source
stringlengths
3
92
c
stringlengths
26
2.25M
edge_vol_int.c
/****************************************************************************** ** Copyright (c) 2016-2019, Intel Corporation ** ** All rights reserved. ** ** ** ** Redistribution and use in source and binary forms, with or without ** ** modification, are permitted provided that the following conditions ** ** are met: ** ** 1. Redistributions of source code must retain the above copyright ** ** notice, this list of conditions and the following disclaimer. ** ** 2. Redistributions in binary form must reproduce the above copyright ** ** notice, this list of conditions and the following disclaimer in the ** ** documentation and/or other materials provided with the distribution. ** ** 3. Neither the name of the copyright holder nor the names of its ** ** contributors may be used to endorse or promote products derived ** ** from this software without specific prior written permission. ** ** ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ** ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ** ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ** ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ** ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ** ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED ** ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR ** ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF ** ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING ** ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "edge_proxy_common.h" #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif /*#define EDGE_HP_1G*/ /*#define HANDLE_AMOK*/ #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) #include <sys/mman.h> #include <linux/mman.h> #endif static void* edge_hp_malloc( size_t nbytes, size_t alignment ) { void* ret_ptr = NULL; #if defined(EDGE_HP_1G) size_t num_large_pages = nbytes / (1073741824L); if ( nbytes > num_large_pages*1073741824L ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 1073741824L; printf("trying to allocate %ld 1G pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"1G mmap call failed\n"); exit(1); } #elif defined(EDGE_HP_2M) size_t num_large_pages = nbytes / (2097152UL); if ( nbytes > num_large_pages*2097152UL ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 2097152UL; printf("trying to allocate %ld 2M pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"2M mmap call failed\n"); exit(1); } #else ret_ptr = libxsmm_aligned_malloc( nbytes, alignment ); #endif return ret_ptr; } static void edge_hp_free( void* ptr, size_t nbytes ) { LIBXSMM_UNUSED( nbytes ); #if defined(EDGE_HP_1G) /* to be implemented */ #elif defined(EDGE_HP_2M) /* to be implemented */ #else libxsmm_free( ptr ); #endif } #if defined(__AVX512F__) static void matMulFusedAC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), _mm512_set1_pd( i_beta ) ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { vc = _mm512_fmadd_pd( _mm512_set1_pd( i_b[l_k*i_ldB + l_n] ), _mm512_loadu_pd( &(i_a[l_m*i_ldA*8 + l_k*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } static void matMulFusedBC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), _mm512_set1_pd( i_beta ) ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { vc = _mm512_fmadd_pd( _mm512_set1_pd( i_a[l_m*i_ldA + l_k] ), _mm512_loadu_pd( &(i_b[l_k*i_ldB*8 + l_n*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } #endif static void amok_detect( const double* i_runtimes, size_t* io_amoks, const size_t i_workers ) { double time_avg; size_t i; time_avg = 0.0; for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { time_avg += i_runtimes[8*i]; } } time_avg = time_avg/((double)(i_workers-io_amoks[8*i_workers])); /* let detect amoks */ for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { if ( i_runtimes[8*i] > time_avg*1.07 ) { /* this is the amok condition */ io_amoks[8*i_workers]++; io_amoks[8*i] = 1; } } } } static void amok_balance( const size_t* i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t* io_chunk, size_t* io_mystart, size_t* io_myend ) { size_t l_chunk, l_start, l_end; size_t l_cur_amoks = i_amoks[8*i_workers]; size_t l_non_amoks = i_workers - l_cur_amoks; l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1); if (i_amoks[8*i_mytid] != 0) { l_start = 0; l_end = 0; } else { size_t l_tid_offset = 0; size_t l_z; for ( l_z = 0; l_z < i_mytid; l_z++) { if ( i_amoks[8*l_z] != 0 ) { l_tid_offset++; } } l_tid_offset = i_mytid - l_tid_offset; l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize; l_end = ((l_tid_offset+1) * l_chunk < i_worksize) ? ((l_tid_offset+1) * l_chunk) : i_worksize; } *io_chunk = l_chunk; *io_mystart = l_start; *io_myend = l_end; } int main(int argc, char* argv[]) { char* mat_a = 0; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double* mat_a_values; libxsmm_dmmfunction a_kernel; char* mat_b = 0; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double* mat_b_values; libxsmm_dmmfunction b_kernel; char* mat_c = 0; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double* mat_c_values; libxsmm_dmmfunction c_kernel; char* mat_st = 0; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double* mat_st_values; libxsmm_dmmfunction st_kernel; int num_modes = 9; int num_quants = 9; size_t num_elems = 0; size_t num_cfr = 8; size_t num_reps = 1; size_t elem_size; /* OpenMP: signed induction variables */ int i, j; const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0; libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star; const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE; const int flags = LIBXSMM_GEMM_FLAGS('N', 'N'); const double alpha = 1, beta = 1; double flops_vol; double* q; double* qt; double* qs; double* star; double* global; unsigned long long l_start, l_end; double l_total; unsigned int l_num_threads; unsigned int l_star_ent = num_quants*num_quants; double* l_total_thread; double* l_cur_thread_time; double time_max; double time_min; double time_avg; size_t* amoks; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > (int)i) mat_a = argv[i++]; if (argc > (int)i) mat_b = argv[i++]; if (argc > (int)i) mat_c = argv[i++]; if (argc > (int)i) mat_st = argv[i++]; if (argc > (int)i) num_modes = atoi(argv[i++]); if (argc > (int)i) num_elems = atoi(argv[i++]); if (argc > (int)i) num_reps = atoi(argv[i++]); elem_size = num_modes*num_quants*num_cfr; #if defined(_OPENMP) #pragma omp parallel { #pragma omp master { l_num_threads = omp_get_num_threads(); } } #else l_num_threads = 1; #endif l_total_thread = (double*)malloc(8*l_num_threads*sizeof(double)); l_cur_thread_time = (double*)malloc(8*l_num_threads*sizeof(double)); amoks = (size_t*)malloc(8*(l_num_threads+1)*sizeof(size_t)); for ( i = 0; i < 8*((int)l_num_threads+1); i++ ) { amoks[i] = 0; } /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader_double( mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz ); edge_sparse_csr_reader_double( mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz ); edge_sparse_csr_reader_double( mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz ); edge_sparse_csr_reader_double( mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz ); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff, num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch); l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star, num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch); a_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_a_rowptr, mat_a_colidx, (const void*)mat_a_values ).dmm; b_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_b_rowptr, mat_b_colidx, (const void*)mat_b_values ).dmm; c_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_c_rowptr, mat_c_colidx, (const void*)mat_c_values ).dmm; st_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_star, mat_st_rowptr, mat_st_colidx, (const void*)mat_st_values ).dmm; if ( a_kernel == 0 ) { printf("a kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("b kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("c kernel could not be built -> exit!"); exit(-1); } if ( st_kernel == 0 ) { printf("st kernel could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* copying code to 1 GB page */ #if 0 #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) printf("copying code to 1GB page...\n"); onegcode = (void*)edge_hp_malloc( 5*1024*1024, 2097152 ); memcpy( onegcode, (void*) a_kernel, 1505 ); memcpy( onegcode+(1*1024*1024)+64, (void*) b_kernel, 2892 ); memcpy( onegcode+(2*1024*1024)+128, (void*) c_kernel, 3249 ); memcpy( onegcode+(3*1024*1024)+196, (void*)st_kernel, 11010 ); a_kernel = (libxsmm_dmmfunction)onegcode; b_kernel = (libxsmm_dmmfunction)(onegcode+(1*1024*1024)+64); c_kernel = (libxsmm_dmmfunction)(onegcode+(2*1024*1024)+128); st_kernel = (libxsmm_dmmfunction)(onegcode+(3*1024*1024)+196); printf("...done\n\n"); #endif #endif /* create unknowns and t-unknowns */ printf("allocating and initializing fake data... \n"); /* DoFs */ printf(" q: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); q = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* tDofs */ printf(" qt: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); qt = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* star matrices */ printf(" star: %f MiB\n", ((double)(num_elems*3*l_star_ent*sizeof(double))) / ( 1024.0*1024.0 ) ); star = (double*)edge_hp_malloc( num_elems*3*l_star_ent*sizeof(double), 2097152); /* stiffness matrices */ printf("global: %f MiB\n", ((double)(3*num_modes*num_modes*sizeof(double))) / ( 1024.0*1024 ) ); global = (double*)edge_hp_malloc( 3*num_modes*num_modes*sizeof(double), 2097152); /* per thread scratch */ printf(" t: %f MiB\n", ((double)(l_num_threads*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) ); qs = (double*)edge_hp_malloc( l_num_threads*num_modes*num_quants*num_cfr*sizeof(double), 2097152); for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { q[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { qt[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)l_num_threads; i++) { for (j = 0; j < (int)elem_size; j++) { qs[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)mat_st_nnz*3; j++) { star[(i*3*mat_st_nnz)+j] = libxsmm_rng_f64(); } } for (i = 0; i < 3; i++) { for (j = 0; j < num_modes*num_modes; j++) { global[(i*num_modes*num_modes)+j] = libxsmm_rng_f64(); } } printf("allocation done!\n\n"); printf("running benchmark...\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i, j) #endif { #if defined(_OPENMP) int mytid = omp_get_thread_num(); #else int mytid = 0; #endif libxsmm_timer_tickint mystart, myend; #if defined(HANDLE_AMOK) size_t cur_amoks = 0; size_t non_amoks = l_num_threads; #endif size_t l_el_chunk = 0; size_t l_el_start = 0; size_t l_el_end = 0; /* initial work distribution */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); for (i = 0; i < (int)num_reps; i++) { #if defined(HANDLE_AMOK) /* did we had an amok? */ if (cur_amoks != amoks[8*l_num_threads]) { cur_amoks = amoks[8*l_num_threads]; non_amoks = l_num_threads - cur_amoks; /* re-balance work */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); } #endif mystart = libxsmm_timer_tick(); for (j = (int)l_el_start; j < (int)l_el_end; j++) { #if 1 st_kernel( star+(j*3*mat_st_nnz) , qt+(j*elem_size), qs+(mytid*elem_size) ); a_kernel( qs+(mytid*elem_size), global , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+mat_st_nnz , qt+(j*elem_size), qs+(mytid*elem_size) ); b_kernel( qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); c_kernel( qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #else matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global, q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+mat_st_nnz, qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #endif } myend = libxsmm_timer_tick(); l_cur_thread_time[8*mytid] = libxsmm_timer_duration( mystart, myend ); l_total_thread[8*mytid] += libxsmm_timer_duration( mystart, myend ); #if defined(_OPENMP) #pragma omp barrier #endif #if defined(HANDLE_AMOK) /* checking for amoks is centralized business */ if (mytid == 0) { /* amok check */ amok_detect( l_cur_thread_time, amoks, l_num_threads ); } #if defined(_OPENMP) #pragma omp barrier #endif #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("...done!\n\n"); /* some timing stats */ time_max = 0.0; time_min = 80000000; time_avg = 0.0; for (i = 0; i < (int)l_num_threads; i++) { if( amoks[8*i] == 0 ) { if( l_total_thread[8*i] > time_max) time_max = l_total_thread[8*i]; if( l_total_thread[8*i] < time_min) time_min = l_total_thread[8*i]; time_avg += l_total_thread[8*i]; } } time_avg = time_avg/((double)(l_num_threads-amoks[8*l_num_threads])); flops_vol = (double)num_quants * (double)mat_a_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_b_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_c_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_modes * (double)mat_st_nnz * (double)num_cfr * 6.0; /* 3 star matrix mul */ printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8*l_num_threads]); for ( i = 0; i < (int)l_num_threads; i++ ) { if ( amoks[8*i] != 0 ) { printf("%i,", i); } } printf("\n"); printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0*1024.0*1024.0) ); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(24*t2-Nz+9,4)),3*t1+1),6*t1-6*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(12*t1+Ny+15,4)),floord(24*t2+Ny+11,4)),floord(24*t1-24*t2+Nz+Ny+13,4));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-2,4)),ceild(3*t1-6,8)),ceild(24*t2-Nz-19,32)),ceild(4*t3-Ny-19,32));t4<=min(min(min(min(floord(4*Nt+Nx-9,32),floord(12*t1+Nx+15,32)),floord(24*t2+Nx+11,32)),floord(4*t3+Nx-9,32)),floord(24*t1-24*t2+Nz+Nx+13,32));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),t3-1),8*t4+6);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(32*t4,4*t5+4); ubv=min(32*t4+31,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
convolution_winograd_transform_pack4_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_fp16sa_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); __fp16 tmp[8][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* r0 = img0.row<const __fp16>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float16x4_t _r00 = vld1_f16(r0); float16x4_t _r01 = vld1_f16(r0 + 4); float16x4_t _r02 = vld1_f16(r0 + 8); float16x4_t _r03 = vld1_f16(r0 + 12); float16x4_t _r04 = vld1_f16(r0 + 16); float16x4_t _r05 = vld1_f16(r0 + 20); float16x4_t _r06 = vld1_f16(r0 + 24); float16x4_t _r07 = vld1_f16(r0 + 28); float16x4_t _tmp0m = vfma_n_f16(vsub_f16(_r00, _r06), vsub_f16(_r04, _r02), 5.25f); float16x4_t _tmp7m = vfma_n_f16(vsub_f16(_r07, _r01), vsub_f16(_r03, _r05), 5.25f); float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_r02, _r06), _r04, 4.25f); float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_r01, _r05), _r03, 4.25f); float16x4_t _tmp1m = vadd_f16(_tmp12a, _tmp12b); float16x4_t _tmp2m = vsub_f16(_tmp12a, _tmp12b); float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_r06, _r02, 0.25f), _r04, 1.25f); float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float16x4_t _tmp3m = vadd_f16(_tmp34a, _tmp34b); float16x4_t _tmp4m = vsub_f16(_tmp34a, _tmp34b); float16x4_t _tmp56a = vfma_n_f16(_r06, vfms_n_f16(_r02, _r04, 1.25f), 4.f); float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float16x4_t _tmp5m = vadd_f16(_tmp56a, _tmp56b); float16x4_t _tmp6m = vsub_f16(_tmp56a, _tmp56b); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[4][m], _tmp4m); vst1_f16(tmp[5][m], _tmp5m); vst1_f16(tmp[6][m], _tmp6m); vst1_f16(tmp[7][m], _tmp7m); r0 += w * 4; } __fp16* r0_tm_0 = (__fp16*)img0_tm + (i * w_tiles + j) * 4; __fp16* r0_tm_1 = r0_tm_0 + tiles * 4; __fp16* r0_tm_2 = r0_tm_0 + tiles * 8; __fp16* r0_tm_3 = r0_tm_0 + tiles * 12; __fp16* r0_tm_4 = r0_tm_0 + tiles * 16; __fp16* r0_tm_5 = r0_tm_0 + tiles * 20; __fp16* r0_tm_6 = r0_tm_0 + tiles * 24; __fp16* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _r0tm0 = vfma_n_f16(vsub_f16(_tmp00, _tmp06), vsub_f16(_tmp04, _tmp02), 5.25f); float16x4_t _r0tm7 = vfma_n_f16(vsub_f16(_tmp07, _tmp01), vsub_f16(_tmp03, _tmp05), 5.25f); float16x4_t _tmp12a = vfms_n_f16(vadd_f16(_tmp02, _tmp06), _tmp04, 4.25f); float16x4_t _tmp12b = vfms_n_f16(vadd_f16(_tmp01, _tmp05), _tmp03, 4.25f); float16x4_t _r0tm1 = vadd_f16(_tmp12a, _tmp12b); float16x4_t _r0tm2 = vsub_f16(_tmp12a, _tmp12b); float16x4_t _tmp34a = vfms_n_f16(vfma_n_f16(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float16x4_t _tmp34b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float16x4_t _r0tm3 = vadd_f16(_tmp34a, _tmp34b); float16x4_t _r0tm4 = vsub_f16(_tmp34a, _tmp34b); float16x4_t _tmp56a = vfma_n_f16(_tmp06, vfms_n_f16(_tmp02, _tmp04, 1.25f), 4.f); float16x4_t _tmp56b = vfma_n_f16(vfms_n_f16(vmul_n_f16(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float16x4_t _r0tm5 = vadd_f16(_tmp56a, _tmp56b); float16x4_t _r0tm6 = vsub_f16(_tmp56a, _tmp56b); vst1_f16(r0_tm_0, _r0tm0); vst1_f16(r0_tm_1, _r0tm1); vst1_f16(r0_tm_2, _r0tm2); vst1_f16(r0_tm_3, _r0tm3); vst1_f16(r0_tm_4, _r0tm4); vst1_f16(r0_tm_5, _r0tm5); vst1_f16(r0_tm_6, _r0tm6); vst1_f16(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_fp16sa_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const __fp16* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float16x4_t _bias0 = biasptr ? vld1_f16(biasptr + p * 4) : vdup_n_f16(0.f); __fp16 tmp[6][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const __fp16* output0_tm_0 = (const __fp16*)out0_tm + (i * w_tiles + j) * 4; const __fp16* output0_tm_1 = output0_tm_0 + tiles * 4; const __fp16* output0_tm_2 = output0_tm_0 + tiles * 8; const __fp16* output0_tm_3 = output0_tm_0 + tiles * 12; const __fp16* output0_tm_4 = output0_tm_0 + tiles * 16; const __fp16* output0_tm_5 = output0_tm_0 + tiles * 20; const __fp16* output0_tm_6 = output0_tm_0 + tiles * 24; const __fp16* output0_tm_7 = output0_tm_0 + tiles * 28; __fp16* output0 = out0.row<__fp16>(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float16x4_t _out0tm0 = vld1_f16(output0_tm_0); float16x4_t _out0tm1 = vld1_f16(output0_tm_1); float16x4_t _out0tm2 = vld1_f16(output0_tm_2); float16x4_t _out0tm3 = vld1_f16(output0_tm_3); float16x4_t _out0tm4 = vld1_f16(output0_tm_4); float16x4_t _out0tm5 = vld1_f16(output0_tm_5); float16x4_t _out0tm6 = vld1_f16(output0_tm_6); float16x4_t _out0tm7 = vld1_f16(output0_tm_7); float16x4_t _tmp024a = vadd_f16(_out0tm1, _out0tm2); float16x4_t _tmp135a = vsub_f16(_out0tm1, _out0tm2); float16x4_t _tmp024b = vadd_f16(_out0tm3, _out0tm4); float16x4_t _tmp135b = vsub_f16(_out0tm3, _out0tm4); float16x4_t _tmp024c = vadd_f16(_out0tm5, _out0tm6); float16x4_t _tmp135c = vsub_f16(_out0tm5, _out0tm6); float16x4_t _tmp0m = vadd_f16(vadd_f16(_out0tm0, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f)); float16x4_t _tmp2m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float16x4_t _tmp4m = vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); float16x4_t _tmp1m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float16x4_t _tmp3m = vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float16x4_t _tmp5m = vadd_f16(vadd_f16(_out0tm7, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f)); vst1_f16(tmp[0][m], _tmp0m); vst1_f16(tmp[1][m], _tmp1m); vst1_f16(tmp[2][m], _tmp2m); vst1_f16(tmp[3][m], _tmp3m); vst1_f16(tmp[4][m], _tmp4m); vst1_f16(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float16x4_t _tmp00 = vld1_f16(tmp[m][0]); float16x4_t _tmp01 = vld1_f16(tmp[m][1]); float16x4_t _tmp02 = vld1_f16(tmp[m][2]); float16x4_t _tmp03 = vld1_f16(tmp[m][3]); float16x4_t _tmp04 = vld1_f16(tmp[m][4]); float16x4_t _tmp05 = vld1_f16(tmp[m][5]); float16x4_t _tmp06 = vld1_f16(tmp[m][6]); float16x4_t _tmp07 = vld1_f16(tmp[m][7]); float16x4_t _tmp024a = vadd_f16(_tmp01, _tmp02); float16x4_t _tmp135a = vsub_f16(_tmp01, _tmp02); float16x4_t _tmp024b = vadd_f16(_tmp03, _tmp04); float16x4_t _tmp135b = vsub_f16(_tmp03, _tmp04); float16x4_t _tmp024c = vadd_f16(_tmp05, _tmp06); float16x4_t _tmp135c = vsub_f16(_tmp05, _tmp06); float16x4_t _out00 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp00, _tmp024a), vfma_n_f16(_tmp024b, _tmp024c, 32.f))); float16x4_t _out02 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float16x4_t _out04 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); float16x4_t _out01 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float16x4_t _out03 = vadd_f16(_bias0, vfma_n_f16(vfma_n_f16(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float16x4_t _out05 = vadd_f16(_bias0, vadd_f16(vadd_f16(_tmp07, _tmp135a), vfma_n_f16(_tmp135c, _tmp135b, 32.f))); vst1_f16(output0, _out00); vst1_f16(output0 + 4, _out01); vst1_f16(output0 + 8, _out02); vst1_f16(output0 + 12, _out03); vst1_f16(output0 + 16, _out04); vst1_f16(output0 + 20, _out05); output0 += outw * 4; } } } } }
8495.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp target teams distribute dist_schedule(static, 16) private(j) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
GB_binop__bset_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__bset_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__bset_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__bset_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__bset_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint64) // C=scalar+B GB (_bind1st__bset_uint64) // C=scalar+B' GB (_bind1st_tran__bset_uint64) // C=A+scalar GB (_bind2nd__bset_uint64) // C=A'+scalar GB (_bind2nd_tran__bset_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_BITSET (aij, bij, uint64_t, 64) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, uint64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_UINT64 || GxB_NO_BSET_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bset_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, uint64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, uint64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bset_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, uint64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bset_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; struct OMPTraitProperty; struct OMPTraitSelector; struct OMPTraitSet; class OMPTraitInfo; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; mutable IdentifierInfo *Ident_abstract; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector" /// and "bool" fast comparison. Only present if AltiVec or ZVector are /// enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; IdentifierInfo *Ident_Bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> FPEvalMethodHandler; std::unique_ptr<PragmaHandler> FloatControlHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> MSFenvAccess; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFenvAccessHandler; std::unique_ptr<PragmaHandler> STDCFenvRoundHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler; std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// Current kind of OpenMP clause OpenMPClauseKind OMPClauseKind = llvm::omp::OMPC_unknown; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; void MaybeDestroyTemplateIds() { if (!TemplateIds.empty() && (Tok.is(tok::eof) || !PP.mightHavePendingAnnotationTokens())) DestroyTemplateIds(); } void DestroyTemplateIds(); /// RAII object to destroy TemplateIdAnnotations where possible, from a /// likely-good position during parsing. struct DestroyTemplateIdAnnotationsRAIIObj { Parser &Self; DestroyTemplateIdAnnotationsRAIIObj(Parser &Self) : Self(Self) {} ~DestroyTemplateIdAnnotationsRAIIObj() { Self.MaybeDestroyTemplateIds(); } }; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result, Sema::ModuleImportState &ImportState); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, Sema::ModuleImportState &ImportState); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; Sema::ModuleImportState IS = Sema::ModuleImportState::NotACXX20Module; return ParseTopLevelDecl(Result, IS); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// Handle the annotation token produced for /// #pragma STDC FENV_ROUND... void HandlePragmaFEnvRound(); /// Handle the annotation token produced for /// #pragma float_control void HandlePragmaFloatControl(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static TypeResult getTypeAnnotation(const Token &Tok) { if (!Tok.getAnnotationValue()) return TypeError(); return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, TypeResult T) { assert((T.isInvalid() || T.get()) && "produced a valid-but-null type annotation?"); Tok.setAnnotationValue(T.isInvalid() ? nullptr : T.get().getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); bool MightBeCXXScopeToken() { return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) || (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) || Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super); } bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) { return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext); } private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && Tok.getIdentifierInfo() != Ident_Bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser &p) : P(p), PrevPreferredType(P.PreferredType) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); /// Kinds of compound pseudo-tokens formed by a sequence of two real tokens. enum class CompoundToken { /// A '(' '{' beginning a statement-expression. StmtExprBegin, /// A '}' ')' ending a statement-expression. StmtExprEnd, /// A '[' '[' beginning a C++11 or C2x attribute. AttrBegin, /// A ']' ']' ending a C++11 or C2x attribute. AttrEnd, /// A '::' '*' forming a C++ pointer-to-member declaration. MemberPtr, }; /// Check that a compound operator was written in a "sensible" way, and warn /// if not. void checkCompoundToken(SourceLocation FirstTokLoc, tok::TokenKind FirstTokKind, CompoundToken Op); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// Introduces zero or more scopes for parsing. The scopes will all be exited /// when the object is destroyed. class MultiParseScope { Parser &Self; unsigned NumScopes = 0; MultiParseScope(const MultiParseScope&) = delete; public: MultiParseScope(Parser &Self) : Self(Self) {} void Enter(unsigned ScopeFlags) { Self.EnterScope(ScopeFlags); ++NumScopes; } void Exit() { while (NumScopes) { Self.ExitScope(); --NumScopes; } } ~MultiParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); /// Re-enter the template scopes for a declaration that might be a template. unsigned ReenterTemplateScopes(MultiParseScope &S, Decl *D); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. Balances (), [], and {} delimiter tokens while /// skipping. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); virtual void ParseLexedPragmas(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; void ParseLexedPragmas() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; /// Contains the lexed tokens of a pragma with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. class LateParsedPragma : public LateParsedDeclaration { Parser *Self = nullptr; AccessSpecifier AS = AS_none; CachedTokens Toks; public: explicit LateParsedPragma(Parser *P, AccessSpecifier AS) : Self(P), AS(AS) {} void takeToks(CachedTokens &Cached) { Toks.swap(Cached); } const CachedTokens &toks() const { return Toks; } AccessSpecifier getAccessSpecifier() const { return AS; } void ParseLexedPragmas() override; }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; explicit LexedMethod(Parser *P, Decl *MD) : Self(P), D(MD) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser *Self; /// Method - The method declaration. Decl *Method; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) {} /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr) {} ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; // In ParseCXXInlineMethods.cpp. struct ReenterTemplateScopeRAII; struct ReenterClassScopeRAII; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, const ParsedAttributesView &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); void ParseLexedPragmas(ParsingClass &Class); void ParseLexedPragma(LateParsedPragma &LP); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributes &Attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsedAttributes &Attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributes &Attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc is filled with the location of the last token of the simple-asm. ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc); ExprResult ParseAsmStringLiteral(bool ForAsmLabel); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); ExprResult ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause); ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); /// Control what ParseCastExpression will parse. enum CastParseKind { AnyCastExpr = 0, UnaryExprOnly, PrimaryExprOnly }; ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); ExprResult ParseCastExpression(CastParseKind ParseKind, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false, bool *NotPrimaryExpression = nullptr); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseSYCLUniqueStableNameExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>(), bool FailImmediatelyOnInvalidExpr = false, bool EarlyTypoCorrection = false); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHasErrors, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, ForRangeInfo *FRI = nullptr, bool EnterForConditionScope = false); DeclGroupPtrTy ParseAliasDeclarationInInitStatement(DeclaratorContext Context, ParsedAttributes &Attrs); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C++ Concepts ExprResult ParseRequiresExpression(); void ParseTrailingRequiresClause(Declarator &D); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); struct DesignatorCompletionInfo { SmallVectorImpl<Expr *> &InitExprs; QualType PreferredBaseType; }; ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributes &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributes &Attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK, bool MissingOK, SourceLocation *LParenLoc, SourceLocation *RParenLoc); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributes &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Whether a defining-type-specifier is permitted in a given context. enum class AllowDefiningTypeSpec { /// The grammar doesn't allow a defining-type-specifier here, and we must /// not parse one (eg, because a '{' could mean something else). No, /// The grammar doesn't allow a defining-type-specifier here, but we permit /// one for error recovery purposes. Sema will reject. NoButErrorRecovery, /// The grammar allows a defining-type-specifier here, even though it's /// always invalid. Sema will reject. YesButInvalid, /// The grammar allows a defining-type-specifier here, and one can be valid. Yes }; /// Is this a context in which we are parsing defining-type-specifiers (and /// so permit class and enum definitions in addition to non-defining class and /// enum elaborated-type-specifiers)? static AllowDefiningTypeSpec isDefiningTypeSpecifierContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: return AllowDefiningTypeSpec::Yes; case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: return AllowDefiningTypeSpec::YesButInvalid; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: return AllowDefiningTypeSpec::NoButErrorRecovery; case DeclSpecContext::DSC_trailing: return AllowDefiningTypeSpec::No; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which an opaque-enum-declaration can appear? static bool isOpaqueEnumDeclarationContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: return true; case DeclSpecContext::DSC_alias_declaration: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &Attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &Attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributes &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, RecordDecl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return Tok.is(tok::kw_using) || isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Determine whether we could have an enum-base. /// /// \p AllowSemi If \c true, then allow a ';' after the enum-base; otherwise /// only consider this to be an enum-base if the next token is a '{'. /// /// \return \c false if this cannot possibly be an enum base; \c true /// otherwise. bool isEnumBase(bool AllowSemi); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an '(' after an 'explicit' keyword is part of a C++20 /// 'explicit(bool)' declaration, in earlier language modes where that is an /// extension. TPResult isExplicitBool(); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); /// Try to skip a possibly empty sequence of 'attribute-specifier's without /// full validation of the syntactic structure of attributes. bool TrySkipAttributes(); /// Diagnoses use of _ExtInt as being deprecated, and diagnoses use of /// _BitInt as an extension when appropriate. void DiagnoseBitIntUse(const Token &Tok); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeName, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributes &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributes &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributes &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesView &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned DiagID, bool DiagnoseEmptyAttrs = false); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Emit warnings for C++11 and C2x attributes that are in a position that /// clang accepts as an extension. void DiagnoseCXX11AttributeExtension(ParsedAttributes &Attrs); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); enum ParseAttrKindMask { PAKM_GNU = 1 << 0, PAKM_Declspec = 1 << 1, PAKM_CXX11 = 1 << 2, }; /// \brief Parse attributes based on what syntaxes are desired, allowing for /// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec: /// __attribute__((...)) __declspec(...) __attribute__((...))) /// Note that Microsoft attributes (spelled with single square brackets) are /// not supported by this because of parsing ambiguities with other /// constructs. /// /// There are some attribute parse orderings that should not be allowed in /// arbitrary order. e.g., /// /// [[]] __attribute__(()) int i; // OK /// __attribute__(()) [[]] int i; // Not OK /// /// Such situations should use the specific attribute parsing functionality. void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr); /// \brief Possibly parse attributes based on what syntaxes are desired, /// allowing for the order to vary. bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) || (standardAttributesAllowed() && isCXX11AttributeSpecifier())) { ParseAttributes(WhichAttrKinds, Attrs, LateAttrs); return true; } return false; } void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes Attrs(AttrFactory); ParseGNUAttributes(Attrs, LateAttrs, &D); D.takeAttributes(Attrs); } } bool MaybeParseGNUAttributes(ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParseGNUAttributes(Attrs, LateAttrs); return true; } return false; } void ParseGNUAttributes(ParsedAttributes &Attrs, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) { // If parsing the attributes found an OpenMP directive, emit those tokens // to the parse stream now. if (!OpenMPTokens.empty()) { PP.EnterToken(Tok, /*IsReinject*/ true); PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true, /*IsReinject*/ true); ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true); } } void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributes Attrs(AttrFactory); ParseCXX11Attributes(Attrs); D.takeAttributes(Attrs); } } bool MaybeParseCXX11Attributes(ParsedAttributes &Attrs, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) { ParseCXX11Attributes(Attrs); return true; } return false; } void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName, CachedTokens &OpenMPTokens); void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs, CachedTokens &OpenMPTokens, SourceLocation *EndLoc = nullptr); void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs, SourceLocation *EndLoc = nullptr) { CachedTokens OpenMPTokens; ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc); ReplayOpenMPAttributeTokens(OpenMPTokens); } void ParseCXX11Attributes(ParsedAttributes &attrs); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, CachedTokens &OpenMPTokens); IdentifierInfo *TryParseCXX11AttributeIdentifier( SourceLocation &Loc, Sema::AttributeCompletion Completion = Sema::AttributeCompletion::None, const IdentifierInfo *EnclosingScope = nullptr); void MaybeParseMicrosoftAttributes(ParsedAttributes &Attrs) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) { ParsedAttributes AttrsWithRange(AttrFactory); ParseMicrosoftAttributes(AttrsWithRange); Attrs.takeAllFrom(AttrsWithRange); } } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &Attrs); bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs) { if (getLangOpts().DeclSpecKeyword && Tok.is(tok::kw___declspec)) { ParseMicrosoftDeclSpecs(Attrs); return true; } return false; } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewTypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); ExprResult ParseExtIntegerArgument(); void ParsePtrauthQualifier(ParsedAttributes &Attrs); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; bool isClassCompatibleKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &FirstArgAttrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); void InitCXXThisScopeForDeclaratorIfRelevant( const Declarator &D, const DeclSpec &DS, llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( DeclaratorContext DeclaratorContext, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributes &Attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &Attrs, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributes &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributes &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(AccessSpecifier &AS, ParsedAttributes &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse a property kind into \p TIProperty for the selector set \p Set and /// selector \p Selector. void parseOMPTraitPropertyKind(OMPTraitProperty &TIProperty, llvm::omp::TraitSet Set, llvm::omp::TraitSelector Selector, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector kind into \p TISelector for the selector set \p Set. void parseOMPTraitSelectorKind(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parse a selector set kind into \p TISet. void parseOMPTraitSetKind(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context property. void parseOMPContextProperty(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &Seen); /// Parses an OpenMP context selector. void parseOMPContextSelector(OMPTraitSelector &TISelector, llvm::omp::TraitSet Set, llvm::StringMap<SourceLocation> &SeenSelectors); /// Parses an OpenMP context selector set. void parseOMPContextSelectorSet(OMPTraitSet &TISet, llvm::StringMap<SourceLocation> &SeenSets); /// Parses OpenMP context selectors. bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI); /// Parse an 'append_args' clause for '#pragma omp declare variant'. bool parseOpenMPAppendArgs( SmallVectorImpl<OMPDeclareVariantAttr::InteropType> &InterOpTypes); /// Parse a `match` clause for an '#pragma omp declare variant'. Return true /// if there was an error. bool parseOMPDeclareVariantMatchClause(SourceLocation Loc, OMPTraitInfo &TI, OMPTraitInfo *ParentTI); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse 'omp [begin] assume[s]' directive. void ParseOpenMPAssumesDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parse 'omp end assumes' directive. void ParseOpenMPEndAssumesDirective(SourceLocation Loc); /// Parse clauses for '#pragma omp [begin] declare target'. void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind, OpenMPDirectiveKind EndDKind, SourceLocation Loc); /// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if /// it is not the current token. void skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind); /// Check the \p FoundKind against the \p ExpectedKind, if not issue an error /// that the "end" matching the "begin" directive of kind \p BeginKind was not /// found. Finally, if the expected kind was found or if \p SkipUntilOpenMPEnd /// is set, skip ahead using the helper `skipUntilPragmaOpenMPEnd`. void parseOMPEndDirective(OpenMPDirectiveKind BeginKind, OpenMPDirectiveKind ExpectedKind, OpenMPDirectiveKind FoundKind, SourceLocation MatchingLoc, SourceLocation FoundLoc, bool SkipUntilOpenMPEnd); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributes &Attrs, bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Tries to parse cast part of OpenMP array shaping operation: /// '[' expression ']' { '[' expression ']' } ')'. bool tryParseOpenMPArrayShapingCastPart(); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. /// \param ReadDirectiveWithinMetadirective true if directive is within a /// metadirective and therefore ends on the closing paren. StmtResult ParseOpenMPDeclarativeOrExecutableDirective( ParsedStmtContext StmtCtx, bool ReadDirectiveWithinMetadirective = false); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses indirect clause /// \param ParseOnly true to skip the clause's semantic actions and return // false; bool ParseOpenMPIndirectClause(Sema::DeclareTargetContextInfo &DTCI, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param DKind Directive kind. /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses the 'sizes' clause of a '#pragma omp tile' directive. OMPClause *ParseOpenMPSizesClause(); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); /// Parses and creates OpenMP 5.0 iterators expression: /// <iterators> = 'iterator' '(' { [ <iterator-type> ] identifier = /// <range-specification> }+ ')' ExprResult ParseOpenMPIteratorsExpr(); /// Parses allocators and traits in the context of the uses_allocator clause. /// Expected format: /// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')' OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind); /// Parses clause with an interop variable of kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. // OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *DepModOrTailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers> MapTypeModifiers; SmallVector<SourceLocation, NumberOfOMPMapClauseModifiers> MapTypeModifiersLoc; SmallVector<OpenMPMotionModifierKind, NumberOfOMPMotionModifiers> MotionModifiers; SmallVector<SourceLocation, NumberOfOMPMotionModifiers> MotionModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation ExtraModifierLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, ParsedType ObjectType, bool ObjectHadErrors, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(MultiParseScope &TemplateScopes, unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); TPResult isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); bool isTypeConstraintAnnotation(); bool TryAnnotateTypeConstraint(); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation LAngleLoc, SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc, TemplateTy NameHint = nullptr); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true, bool TypeConstraint = false); void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS, bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs, TemplateTy Template, SourceLocation OpenLoc); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(Sema::ModuleImportState &ImportState); Decl *ParseModuleImport(SourceLocation AtLoc, Sema::ModuleImportState &ImportState); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); ExprResult ParseBuiltinPtrauthTypeDiscriminator(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; class GNUAsmQualifiers { unsigned Qualifiers = AQ_unspecified; public: enum AQ { AQ_unspecified = 0, AQ_volatile = 1, AQ_inline = 2, AQ_goto = 4, }; static const char *getQualifierName(AQ Qualifier); bool setAsmQualifier(AQ Qualifier); inline bool isVolatile() const { return Qualifiers & AQ_volatile; }; inline bool isInline() const { return Qualifiers & AQ_inline; }; inline bool isGoto() const { return Qualifiers & AQ_goto; } }; bool isGCCAsmStatement(const Token &TokAfterAsm) const; bool isGNUAsmQualifier(const Token &TokAfterAsm) const; GNUAsmQualifiers::AQ getGNUAsmQualifier(const Token &Tok) const; bool parseGNUAsmQualifierListOpt(GNUAsmQualifiers &AQ); }; } // end namespace clang #endif
private.c
/* */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif float x; int y; int main (int argc, char * argv[]) { #ifdef _OPENMP omp_set_num_threads(4); #endif x=1.0; y=1; #pragma omp parallel private(x) { printf("x=%f, y=%d\n",x,y); } return 0; }
gather_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: jxyang@openailab.com * Update: hhchen@openailab.com */ #include <math.h> #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "gather_param.h" typedef struct { int* in_shape; // the dim of the input int axis; int indices_num; int dim_size; int is_onnx; } gather_param_t; static int ref_gather_fp32(float* input, int* input_indices, float* output, gather_param_t* param, int num_thread) { float* out_ptr = output; float* in_ptr = input; int axis = param->axis; int outer_size = 1; int inner_size = 1; int axis_size = param->in_shape[axis]; for (int i = 0; i < axis; i++) { outer_size *= param->in_shape[i]; } for (int i = axis + 1; i < param->dim_size; i++) { inner_size *= param->in_shape[i]; // printf("inner_size size: %d %d \n", inner_size, param->in_shape[i]); } // #pragma omp parallel for num_threads(num_thread) if(param->is_onnx){ for (int outer = 0; outer < outer_size; ++outer) { memcpy(out_ptr + (outer * param->indices_num ) * inner_size, in_ptr + (outer* axis_size + param->indices_num) * inner_size, inner_size* sizeof(float)); } } else { for (int outer = 0; outer < outer_size; ++outer) { for (int i = 0; i < param->indices_num; i++) { memcpy(out_ptr + (outer * param->indices_num + i) * inner_size, in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size * sizeof(float)); } } } return 0; } static int ref_gather_uint8(uint8_t* input, int* input_indices, uint8_t* output, gather_param_t* param, int num_thread) { uint8_t* out_ptr = output; uint8_t* in_ptr = input; int axis = param->axis; int outer_size = 1; int inner_size = 1; int axis_size = param->in_shape[axis]; for (int i = 0; i < axis; i++) { outer_size *= param->in_shape[i]; } for (int i = axis + 1; i < param->dim_size; i++) { inner_size *= param->in_shape[i]; } // #pragma omp parallel for num_threads(num_thread) for (int outer = 0; outer < outer_size; ++outer) { for (int i = 0; i < param->indices_num; i++) { memcpy(out_ptr + (outer * param->indices_num + i) * inner_size, in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size); } } return 0; } static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct gather_param* gather_param = ( struct gather_param* )ir_node->op.param_mem; gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv; struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); op_priv_info->axis = gather_param->axis; op_priv_info->indices_num = gather_param->indices_num; op_priv_info->is_onnx = gather_param->is_onnx; op_priv_info->in_shape = (int*)sys_malloc(input_tensor->dim_num*sizeof(int)); /* prerun now */ return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct ir_tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct ir_tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv; int out_size = input_tensor->elem_num; // auto in_dim = input_tensor->GetShape().GetDim(); void* input = input_tensor->data; void* indices_data = indices_tensor->data; op_priv_info->dim_size = input_tensor->dim_num; for (int i = 0; i < op_priv_info->dim_size; i++) { op_priv_info->in_shape[i] = input_tensor->dims[i]; } // printf("in shape: %d %d %d %d\n", op_priv_info->in_shape[0], op_priv_info->in_shape[1], op_priv_info->in_shape[3], op_priv_info->in_shape[3]); // int indices_num = op_param.indices_num; void* output = output_tensor->data; int ret = -1; if (input_tensor->data_type == TENGINE_DT_FP32) ret = ref_gather_fp32(input, indices_data, output, op_priv_info, exec_graph->num_thread); else if(input_tensor->data_type == TENGINE_DT_UINT8) ret = ref_gather_uint8(input, indices_data, output, op_priv_info, exec_graph->num_thread); return ret; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; gather_param_t* op_priv_info = ( gather_param_t* )sys_malloc(sizeof(gather_param_t)); if (op_priv_info == NULL) { set_tengine_errno(ENOMEM); return -1; } memset(op_priv_info, 0, sizeof(gather_param_t)); exec_node->ops_priv = op_priv_info; return 0; } static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; gather_param_t* op_param = ( struct gather_param_t* )exec_node->ops_priv; sys_free(op_param->in_shape); return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv; sys_free(op_priv_info); exec_node->ops_priv = NULL; return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_BEST; } static struct node_ops gather_node_ops = {.prerun = prerun, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int reg_gather_ops(void* arg) { return register_builtin_node_ops(OP_GATHER, &gather_node_ops); } static int unreg_gather_ops(void* arg) { return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops); } AUTO_REGISTER_OPS(reg_gather_ops); AUTO_UNREGISTER_OPS(unreg_gather_ops);
rawSHA512_ng_fmt_plug.c
/* * Copyright (c) 2013, epixoip. * Copyright (c) 2015, magnum (pseudo-intrinsics also supporting AVX2/AVX512) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistribution of source * retains the above copyright. */ #include "arch.h" #if SIMD_COEF_32 && ARCH_LITTLE_ENDIAN==1 #if FMT_EXTERNS_H extern struct fmt_main fmt_rawSHA512_ng; #elif FMT_REGISTERS_H john_register_one(&fmt_rawSHA512_ng); #else #if _OPENMP #include <omp.h> #if __XOP__ #ifndef OMP_SCALE #define OMP_SCALE 768 /* AMD */ #endif #else #ifndef OMP_SCALE #define OMP_SCALE 2048 /* Intel */ #endif #endif #endif #include "misc.h" #if !defined(DEBUG) && !defined(WITH_ASAN) // These compilers claim to be __GNUC__ but warn on gcc pragmas. #if __GNUC__ && !__INTEL_COMPILER && !__clang__ && !__llvm__ && !_MSC_VER #pragma GCC optimize 3 #endif #endif #include <stdint.h> #include <string.h> #include "pseudo_intrinsics.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "rawSHA512_common.h" #include "memdbg.h" #if __MIC__ #define SIMD_TYPE "512/512 MIC 8x" #elif __AVX512F__ #define SIMD_TYPE "512/512 AVX512 8x" #elif __AVX2__ #define SIMD_TYPE "256/256 AVX2 4x" #elif __ALTIVEC__ #define SIMD_TYPE "128/128 AltiVec 2x" #elif __ARM_NEON #define SIMD_TYPE "128/128 NEON 2x" #elif __XOP__ #define SIMD_TYPE "128/128 XOP 2x" #elif __SSSE3__ #define SIMD_TYPE "128/128 SSSE3 2x" #else #define SIMD_TYPE "128/128 SSE2 2x" #endif #define FORMAT_LABEL "Raw-SHA512-ng" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA512 " SIMD_TYPE #define VWIDTH SIMD_COEF_64 // max length is not 119, but 8 less than this, or 111. 111 actually make sense. // For SHA512 there are 14 'usable' 8 byte ints, minus 1 byte (for the 0x80). // 14*8-1 is 111. This comment left for reference for future sha2 hackers within JtR. //#define MAXLEN 119 #define MAXLEN 111 #define PLAINTEXT_LENGTH MAXLEN #define CIPHERTEXT_LENGTH 128 #define BINARY_SIZE 8 #define SALT_SIZE 0 #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT VWIDTH #define MAX_KEYS_PER_CRYPT VWIDTH #undef GATHER /* This one is not like the shared ones in pseudo_intrinsics.h */ #if __AVX512F__ || __MIC__ #define GATHER(x,y,z) \ { \ x = vset_epi64(y[index + 7][z], y[index + 6][z], \ y[index + 5][z], y[index + 4][z], \ y[index + 3][z], y[index + 2][z], \ y[index + 1][z], y[index + 0][z]); \ } #elif __AVX2__ #define GATHER(x,y,z) \ { \ x = vset_epi64(y[index + 3][z], y[index + 2][z], \ y[index + 1][z], y[index ][z]); \ } #else #define GATHER(x,y,z) \ { \ x = vset_epi64(y[index + 1][z], y[index ][z]); \ } #endif #define S0(x) \ ( \ vxor( \ vroti_epi64(x, -39), \ vxor( \ vroti_epi64(x, -28), \ vroti_epi64(x, -34) \ ) \ ) \ ) #define S1(x) \ ( \ vxor( \ vroti_epi64(x, -41), \ vxor( \ vroti_epi64(x, -14), \ vroti_epi64(x, -18) \ ) \ ) \ ) #define s0(x) \ ( \ vxor( \ vsrli_epi64(x, 7), \ vxor( \ vroti_epi64(x, -1), \ vroti_epi64(x, -8) \ ) \ ) \ ) #define s1(x) \ ( \ vxor( \ vsrli_epi64(x, 6), \ vxor( \ vroti_epi64(x, -19), \ vroti_epi64(x, -61) \ ) \ ) \ ) #if !VCMOV_EMULATED #define Maj(x,y,z) vcmov(x, y, vxor(z, y)) #else #define Maj(x,y,z) vor(vand(x, y), vand(vor(x, y), z)) #endif #define Ch(x,y,z) vcmov(y, z, x) #define R(t) \ { \ tmp1 = vadd_epi64(s1(w[t - 2]), w[t - 7]); \ tmp2 = vadd_epi64(s0(w[t - 15]), w[t - 16]); \ w[t] = vadd_epi64(tmp1, tmp2); \ } #define SHA512_STEP(a,b,c,d,e,f,g,h,x,K) \ { \ tmp1 = vadd_epi64(h, w[x]); \ tmp2 = vadd_epi64(S1(e),vset1_epi64(K)); \ tmp1 = vadd_epi64(tmp1, Ch(e,f,g)); \ tmp1 = vadd_epi64(tmp1, tmp2); \ tmp2 = vadd_epi64(S0(a),Maj(a,b,c)); \ d = vadd_epi64(tmp1, d); \ h = vadd_epi64(tmp1, tmp2); \ } static uint64_t (*saved_key)[16]; static uint64_t *crypt_key[ 8]; static void init(struct fmt_main *self) { int i; #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_SIMD); for (i = 0; i < 8; i++) crypt_key[i] = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(uint64_t), MEM_ALIGN_SIMD); } static void done(void) { int i; for (i = 0; i < 8; i++) MEM_FREE(crypt_key[i]); MEM_FREE(saved_key); } static void alter_endianity_64(uint64_t *x, unsigned int size) { int i; for (i=0; i < (size / sizeof(*x)); i++) x[i] = JOHNSWAP64(x[i]); } static void *get_binary(char *ciphertext) { static union { unsigned char c[DIGEST_SIZE]; uint64_t w[DIGEST_SIZE / sizeof(uint64_t)]; } *out; int i; if (!out) out = mem_alloc_tiny(DIGEST_SIZE, BINARY_ALIGN); ciphertext += TAG_LENGTH; for (i=0; i < DIGEST_SIZE; i++) out->c[i] = atoi16[ARCH_INDEX(ciphertext[i*2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i*2 + 1])]; alter_endianity_64(out->w, DIGEST_SIZE); out->w[0] -= 0x6a09e667f3bcc908ULL; out->w[1] -= 0xbb67ae8584caa73bULL; out->w[2] -= 0x3c6ef372fe94f82bULL; out->w[3] -= 0xa54ff53a5f1d36f1ULL; out->w[4] -= 0x510e527fade682d1ULL; out->w[5] -= 0x9b05688c2b3e6c1fULL; out->w[6] -= 0x1f83d9abfb41bd6bULL; out->w[7] -= 0x5be0cd19137e2179ULL; return (void *) out; } static int get_hash_0(int index) { return crypt_key[0][index] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[0][index] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[0][index] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[0][index] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[0][index] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[0][index] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[0][index] & PH_MASK_6; } static void set_key(char *key, int index) { uint64_t *buf64 = (uint64_t *) &saved_key[index]; uint8_t *buf8 = (uint8_t * ) buf64; int len = 0; while (*key && len < MAXLEN) buf8[len++] = *key++; buf64[15] = len << 3; buf8[len++] = 0x80; while (buf8[len] && len <= MAXLEN) buf8[len++] = 0; } static char *get_key(int index) { uint64_t *buf64 = (uint64_t *) &saved_key[index]; uint8_t *buf8 = (uint8_t * ) buf64; static char out[MAXLEN + 1]; int len = (int)(buf64[15] >> 3); out[len] = 0; for (len--; len > -1; len--) out[len] = buf8[len]; return (char *) out; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index += VWIDTH) #endif { int i; vtype a, b, c, d, e, f, g, h; vtype w[80], tmp1, tmp2; for (i = 0; i < 14; i += 2) { GATHER(tmp1, saved_key, i); GATHER(tmp2, saved_key, i + 1); vswap64(tmp1); vswap64(tmp2); w[i] = tmp1; w[i + 1] = tmp2; } GATHER(tmp1, saved_key, 14); vswap64(tmp1); w[14] = tmp1; GATHER(w[15], saved_key, 15); for (i = 16; i < 80; i++) R(i); a = vset1_epi64(0x6a09e667f3bcc908ULL); b = vset1_epi64(0xbb67ae8584caa73bULL); c = vset1_epi64(0x3c6ef372fe94f82bULL); d = vset1_epi64(0xa54ff53a5f1d36f1ULL); e = vset1_epi64(0x510e527fade682d1ULL); f = vset1_epi64(0x9b05688c2b3e6c1fULL); g = vset1_epi64(0x1f83d9abfb41bd6bULL); h = vset1_epi64(0x5be0cd19137e2179ULL); SHA512_STEP(a, b, c, d, e, f, g, h, 0, 0x428a2f98d728ae22ULL); SHA512_STEP(h, a, b, c, d, e, f, g, 1, 0x7137449123ef65cdULL); SHA512_STEP(g, h, a, b, c, d, e, f, 2, 0xb5c0fbcfec4d3b2fULL); SHA512_STEP(f, g, h, a, b, c, d, e, 3, 0xe9b5dba58189dbbcULL); SHA512_STEP(e, f, g, h, a, b, c, d, 4, 0x3956c25bf348b538ULL); SHA512_STEP(d, e, f, g, h, a, b, c, 5, 0x59f111f1b605d019ULL); SHA512_STEP(c, d, e, f, g, h, a, b, 6, 0x923f82a4af194f9bULL); SHA512_STEP(b, c, d, e, f, g, h, a, 7, 0xab1c5ed5da6d8118ULL); SHA512_STEP(a, b, c, d, e, f, g, h, 8, 0xd807aa98a3030242ULL); SHA512_STEP(h, a, b, c, d, e, f, g, 9, 0x12835b0145706fbeULL); SHA512_STEP(g, h, a, b, c, d, e, f, 10, 0x243185be4ee4b28cULL); SHA512_STEP(f, g, h, a, b, c, d, e, 11, 0x550c7dc3d5ffb4e2ULL); SHA512_STEP(e, f, g, h, a, b, c, d, 12, 0x72be5d74f27b896fULL); SHA512_STEP(d, e, f, g, h, a, b, c, 13, 0x80deb1fe3b1696b1ULL); SHA512_STEP(c, d, e, f, g, h, a, b, 14, 0x9bdc06a725c71235ULL); SHA512_STEP(b, c, d, e, f, g, h, a, 15, 0xc19bf174cf692694ULL); SHA512_STEP(a, b, c, d, e, f, g, h, 16, 0xe49b69c19ef14ad2ULL); SHA512_STEP(h, a, b, c, d, e, f, g, 17, 0xefbe4786384f25e3ULL); SHA512_STEP(g, h, a, b, c, d, e, f, 18, 0x0fc19dc68b8cd5b5ULL); SHA512_STEP(f, g, h, a, b, c, d, e, 19, 0x240ca1cc77ac9c65ULL); SHA512_STEP(e, f, g, h, a, b, c, d, 20, 0x2de92c6f592b0275ULL); SHA512_STEP(d, e, f, g, h, a, b, c, 21, 0x4a7484aa6ea6e483ULL); SHA512_STEP(c, d, e, f, g, h, a, b, 22, 0x5cb0a9dcbd41fbd4ULL); SHA512_STEP(b, c, d, e, f, g, h, a, 23, 0x76f988da831153b5ULL); SHA512_STEP(a, b, c, d, e, f, g, h, 24, 0x983e5152ee66dfabULL); SHA512_STEP(h, a, b, c, d, e, f, g, 25, 0xa831c66d2db43210ULL); SHA512_STEP(g, h, a, b, c, d, e, f, 26, 0xb00327c898fb213fULL); SHA512_STEP(f, g, h, a, b, c, d, e, 27, 0xbf597fc7beef0ee4ULL); SHA512_STEP(e, f, g, h, a, b, c, d, 28, 0xc6e00bf33da88fc2ULL); SHA512_STEP(d, e, f, g, h, a, b, c, 29, 0xd5a79147930aa725ULL); SHA512_STEP(c, d, e, f, g, h, a, b, 30, 0x06ca6351e003826fULL); SHA512_STEP(b, c, d, e, f, g, h, a, 31, 0x142929670a0e6e70ULL); SHA512_STEP(a, b, c, d, e, f, g, h, 32, 0x27b70a8546d22ffcULL); SHA512_STEP(h, a, b, c, d, e, f, g, 33, 0x2e1b21385c26c926ULL); SHA512_STEP(g, h, a, b, c, d, e, f, 34, 0x4d2c6dfc5ac42aedULL); SHA512_STEP(f, g, h, a, b, c, d, e, 35, 0x53380d139d95b3dfULL); SHA512_STEP(e, f, g, h, a, b, c, d, 36, 0x650a73548baf63deULL); SHA512_STEP(d, e, f, g, h, a, b, c, 37, 0x766a0abb3c77b2a8ULL); SHA512_STEP(c, d, e, f, g, h, a, b, 38, 0x81c2c92e47edaee6ULL); SHA512_STEP(b, c, d, e, f, g, h, a, 39, 0x92722c851482353bULL); SHA512_STEP(a, b, c, d, e, f, g, h, 40, 0xa2bfe8a14cf10364ULL); SHA512_STEP(h, a, b, c, d, e, f, g, 41, 0xa81a664bbc423001ULL); SHA512_STEP(g, h, a, b, c, d, e, f, 42, 0xc24b8b70d0f89791ULL); SHA512_STEP(f, g, h, a, b, c, d, e, 43, 0xc76c51a30654be30ULL); SHA512_STEP(e, f, g, h, a, b, c, d, 44, 0xd192e819d6ef5218ULL); SHA512_STEP(d, e, f, g, h, a, b, c, 45, 0xd69906245565a910ULL); SHA512_STEP(c, d, e, f, g, h, a, b, 46, 0xf40e35855771202aULL); SHA512_STEP(b, c, d, e, f, g, h, a, 47, 0x106aa07032bbd1b8ULL); SHA512_STEP(a, b, c, d, e, f, g, h, 48, 0x19a4c116b8d2d0c8ULL); SHA512_STEP(h, a, b, c, d, e, f, g, 49, 0x1e376c085141ab53ULL); SHA512_STEP(g, h, a, b, c, d, e, f, 50, 0x2748774cdf8eeb99ULL); SHA512_STEP(f, g, h, a, b, c, d, e, 51, 0x34b0bcb5e19b48a8ULL); SHA512_STEP(e, f, g, h, a, b, c, d, 52, 0x391c0cb3c5c95a63ULL); SHA512_STEP(d, e, f, g, h, a, b, c, 53, 0x4ed8aa4ae3418acbULL); SHA512_STEP(c, d, e, f, g, h, a, b, 54, 0x5b9cca4f7763e373ULL); SHA512_STEP(b, c, d, e, f, g, h, a, 55, 0x682e6ff3d6b2b8a3ULL); SHA512_STEP(a, b, c, d, e, f, g, h, 56, 0x748f82ee5defb2fcULL); SHA512_STEP(h, a, b, c, d, e, f, g, 57, 0x78a5636f43172f60ULL); SHA512_STEP(g, h, a, b, c, d, e, f, 58, 0x84c87814a1f0ab72ULL); SHA512_STEP(f, g, h, a, b, c, d, e, 59, 0x8cc702081a6439ecULL); SHA512_STEP(e, f, g, h, a, b, c, d, 60, 0x90befffa23631e28ULL); SHA512_STEP(d, e, f, g, h, a, b, c, 61, 0xa4506cebde82bde9ULL); SHA512_STEP(c, d, e, f, g, h, a, b, 62, 0xbef9a3f7b2c67915ULL); SHA512_STEP(b, c, d, e, f, g, h, a, 63, 0xc67178f2e372532bULL); SHA512_STEP(a, b, c, d, e, f, g, h, 64, 0xca273eceea26619cULL); SHA512_STEP(h, a, b, c, d, e, f, g, 65, 0xd186b8c721c0c207ULL); SHA512_STEP(g, h, a, b, c, d, e, f, 66, 0xeada7dd6cde0eb1eULL); SHA512_STEP(f, g, h, a, b, c, d, e, 67, 0xf57d4f7fee6ed178ULL); SHA512_STEP(e, f, g, h, a, b, c, d, 68, 0x06f067aa72176fbaULL); SHA512_STEP(d, e, f, g, h, a, b, c, 69, 0x0a637dc5a2c898a6ULL); SHA512_STEP(c, d, e, f, g, h, a, b, 70, 0x113f9804bef90daeULL); SHA512_STEP(b, c, d, e, f, g, h, a, 71, 0x1b710b35131c471bULL); SHA512_STEP(a, b, c, d, e, f, g, h, 72, 0x28db77f523047d84ULL); SHA512_STEP(h, a, b, c, d, e, f, g, 73, 0x32caab7b40c72493ULL); SHA512_STEP(g, h, a, b, c, d, e, f, 74, 0x3c9ebe0a15c9bebcULL); SHA512_STEP(f, g, h, a, b, c, d, e, 75, 0x431d67c49c100d4cULL); SHA512_STEP(e, f, g, h, a, b, c, d, 76, 0x4cc5d4becb3e42b6ULL); SHA512_STEP(d, e, f, g, h, a, b, c, 77, 0x597f299cfc657e2aULL); SHA512_STEP(c, d, e, f, g, h, a, b, 78, 0x5fcb6fab3ad6faecULL); SHA512_STEP(b, c, d, e, f, g, h, a, 79, 0x6c44198c4a475817ULL); vstore((vtype*) &crypt_key[0][index], a); vstore((vtype*) &crypt_key[1][index], b); vstore((vtype*) &crypt_key[2][index], c); vstore((vtype*) &crypt_key[3][index], d); vstore((vtype*) &crypt_key[4][index], e); vstore((vtype*) &crypt_key[5][index], f); vstore((vtype*) &crypt_key[6][index], g); vstore((vtype*) &crypt_key[7][index], h); } return count; } static int cmp_all(void *binary, int count) { int i; #ifdef _OPENMP for (i=0; i < count; i++) #else for (i=0; i < VWIDTH; i++) #endif if (((uint64_t *) binary)[0] == crypt_key[0][i]) return 1; return 0; } static int cmp_one(void *binary, int index) { return (((uint64_t *) binary)[0] == crypt_key[0][index]); } static int cmp_exact(char *source, int index) { int i; uint64_t *bin; bin = (uint64_t *) get_binary(source); for (i=1; i < 8; i++) if (((uint64_t *) bin)[i] != crypt_key[i][index]) return 0; return 1; } struct fmt_main fmt_rawSHA512_ng = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, MAXLEN, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP | FMT_OMP_BAD, { NULL }, { FORMAT_TAG, XSHA512_FORMAT_TAG, NSLDAP_FORMAT_TAG }, sha512_common_tests_rawsha512_111 }, { init, done, fmt_default_reset, fmt_default_prepare, sha512_common_valid, sha512_common_split, get_binary, // do not use the 'common' one. fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* SIMD_COEF_32 */
nestedfn-5.c
/* { dg-do run } */ extern void abort (void); void foo (int *j) { int i = 5; int bar (void) { return i + 1; } #pragma omp sections { #pragma omp section { if (bar () != 6) #pragma omp atomic ++*j; } #pragma omp section { if (bar () != 6) #pragma omp atomic ++*j; } } } int main (void) { int j = 0; #pragma omp parallel num_threads (2) foo (&j); if (j) abort (); return 0; }
mongodb_fmt_plug.c
/* Cracker for both MongoDB system and sniffed network hashes. Hacked together * during November of 2012 by Dhiru Kholia <dhiru at openwall.com>. * * Based on https://github.com/cyberpunkych/attacking_mongodb * * Hash format for MongoDB system hashes: user:$mongodb$0$user$hash * Hash format for MongoDB network hashes: user:$mongodb$1$user$salt$hash * * This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without modification, * are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_mongodb; #elif FMT_REGISTERS_H john_register_one(&fmt_mongodb); #else #include "md5.h" #include <string.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> #ifndef OMP_SCALE #ifdef __MIC__ #define OMP_SCALE 512 #else #define OMP_SCALE 16384 // Tuned on K8-dual HT #endif // __MIC__ #endif // OMP_SCALE #endif // _OPENMP #include "memdbg.h" #define FORMAT_LABEL "MongoDB" #define FORMAT_NAME "system / network" #define FORMAT_TAG "$mongodb$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 16 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests mongodb_tests[] = { {"$mongodb$0$sa$75692b1d11c072c6c79332e248c4f699", "sa"}, {"$mongodb$1$sa$58d3229c83e3f87e$0c85e3f74adce5d037426791940c820a", "sa"}, /* Ettercap generated test vectors */ {"$mongodb$1$sa$10441db416a99ffc$797d7e18879446845f10ae9d519960b2", "longpassword"}, {"$mongodb$1$longusername$86336266301fb552$1abe48bac6ad0bf567ab51b094f026a9", "longpassword"}, /* Ettercap fixed salt MiTM attack generated test vectors */ {"$mongodb$1$longusername$0000000000000000$53257e018399a241849cb04c70ba8daa", "longpassword"}, {"$mongodb$1$longusername$0000000000000000$10290925d16d81e50db242c9f3572d91", "longpassword@12345678"}, {"$mongodb$1$eight18_characters$8c82aec197929775$5c414259f7f7a42f8c4d1b6ffb37913a", "123"}, {"$mongodb$1$Herman$9b90cf265f3194d7$a5ca2c517c06fdfb773144d53fb26f56", "123456789"}, {"$mongodb$1$sz110$be8fa52f0e64c250$441d6ece7356c67dcc69dd26e7e0501f", "passWOrd"}, {"$mongodb$1$jack$304b81adddfb4d6f$c95e106f1d9952c88044a0b21a6bd3fd", ""}, // https://jira.mongodb.org/browse/SERVER-9476 {"$mongodb$1$z$ce88504553b16752$6deb79af26ebcdd2b2c40438008cb7b0", "g"}, // https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst {"$mongodb$1$user$2375531c32080ae8$21742f26431831d5cfca035a08c5bdf6", "pencil"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { int type; unsigned char salt[17]; unsigned char username[128]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ptr, *ctcopy, *keeptr; int type, extra; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; if (!(ctcopy = strdup(ciphertext))) return 0; keeptr = ctcopy; ctcopy += FORMAT_TAG_LEN; if (!(ptr = strtokm(ctcopy, "$"))) /* type */ goto error; if (!isdec(ptr)) goto error; type = atoi(ptr); if (type != 0 && type != 1) goto error; if (!(ptr = strtokm(NULL, "$"))) /* username */ goto error; if (strlen(ptr) > 127) goto error; if (type == 0) { if (!(ptr = strtokm(NULL, "$"))) /* hash */ goto error; if (hexlenl(ptr, &extra) != 32 || extra) goto error; } else { if (!(ptr = strtokm(NULL, "$"))) /* salt */ goto error; if (hexlenl(ptr, &extra) != 16 || extra) goto error; if (!(ptr = strtokm(NULL, "$"))) /* hash */ goto error; if (hexlenl(ptr, &extra) != 32 || extra) goto error; } MEM_FREE(keeptr); return 1; error: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; /* skip over "$mongodb$" */ p = strtokm(ctcopy, "$"); cs.type = atoi(p); p = strtokm(NULL, "$"); strcpy((char*)cs.username, p); if (cs.type == 1) { p = strtokm(NULL, "$"); strcpy((char*)cs.salt, p); } MEM_FREE(keeptr); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } inline static void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { if (cur_salt->type == 0) { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username)); MD5_Update(&ctx, ":mongo:", 7); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Final((unsigned char*)crypt_out[index], &ctx); } else { unsigned char hexout[32]; unsigned char out[32]; MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username)); MD5_Update(&ctx, ":mongo:", 7); MD5_Update(&ctx, saved_key[index], strlen(saved_key[index])); MD5_Final(out, &ctx); hex_encode(out, 16, hexout); MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->salt, 16); MD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username)); MD5_Update(&ctx, hexout, 32); MD5_Final((unsigned char*)crypt_out[index], &ctx); } } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void mongodb_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } /* * report salt type as first "tunable cost" */ static unsigned int mongodb_salt_type(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->type; } struct fmt_main fmt_mongodb = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD, { "salt type", /* FIXME: report user name length as 2nd cost? */ }, { FORMAT_TAG }, mongodb_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { mongodb_salt_type, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, mongodb_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__max_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_int8) // A.*B function (eWiseMult): GB (_AemultB_08__max_int8) // A.*B function (eWiseMult): GB (_AemultB_02__max_int8) // A.*B function (eWiseMult): GB (_AemultB_04__max_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int8) // A*D function (colscale): GB (_AxD__max_int8) // D*A function (rowscale): GB (_DxB__max_int8) // C+=B function (dense accum): GB (_Cdense_accumB__max_int8) // C+=b function (dense accum): GB (_Cdense_accumb__max_int8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int8) // C=scalar+B GB (_bind1st__max_int8) // C=scalar+B' GB (_bind1st_tran__max_int8) // C=A+scalar GB (_bind2nd__max_int8) // C=A'+scalar GB (_bind2nd_tran__max_int8) // C type: int8_t // A type: int8_t // A pattern? 0 // B type: int8_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_INT8 || GxB_NO_MAX_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_int8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int8_t alpha_scalar ; int8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int8_t *) alpha_scalar_in)) ; beta_scalar = (*((int8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convoaa.c
/* Copyright 2015. The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012 Martin Uecker uecker@eecs.berkeley.edu */ #include <assert.h> #include "misc/misc.h" #include "num/multind.h" #include "num/flpmath.h" #include "num/conv.h" #include "num/vecops.h" #include "convoaa.h" void overlapandadd(int N, const long dims[N], const long blk[N], complex float* dst, complex float* src1, const long dim2[N], complex float* src2) { long ndims[2 * N]; long L[2 * N]; long ndim2[2 * N]; long ndim3[2 * N]; for (int i = 0; i < N; i++) { assert(0 == dims[i] % blk[i]); assert(dim2[i] <= blk[i]); ndims[i * 2 + 1] = dims[i] / blk[i]; ndims[i * 2 + 0] = blk[i]; L[i * 2 + 1] = dims[i] / blk[i]; L[i * 2 + 0] = blk[i] + dim2[i] - 1; ndim2[i * 2 + 1] = 1; ndim2[i * 2 + 0] = dim2[i]; ndim3[i * 2 + 1] = dims[i] / blk[i] + 1; ndim3[i * 2 + 0] = blk[i]; } complex float* tmp = md_alloc(2 * N, L, CFL_SIZE); // conv_causal_extend(2 * N, L, tmp, ndims, src1, ndim2, src2); conv(2 * N, ~0, CONV_EXTENDED, CONV_CAUSAL, L, tmp, ndims, src1, ndim2, src2); // [------++++|||||||| //long str1[2 * N]; long str2[2 * N]; long str3[2 * N]; //md_calc_strides(2 * N, str1, ndims, 8); md_calc_strides(2 * N, str2, L, 8); md_calc_strides(2 * N, str3, ndim3, 8); md_clear(2 * N, ndim3, dst, CFL_SIZE); md_zadd2(2 * N, L, str3, dst, str3, dst, str2, tmp); md_free(tmp); } void overlapandsave(int N, const long dims[N], const long blk[N], complex float* dst, complex float* src1, const long dim2[N], complex float* src2) { // [------++++ // [------ long ndims[2 * N]; long L[2 * N]; long ndim2[2 * N]; long ndim3[2 * N]; for (int i = 0; i < N; i++) { assert(0 == dims[i] % blk[i]); assert(dim2[i] <= blk[i]); ndims[i * 2 + 1] = dims[i] / blk[i]; ndims[i * 2 + 0] = blk[i]; L[i * 2 + 1] = dims[i] / blk[i]; L[i * 2 + 0] = blk[i] + dim2[i] - 1; ndim2[i * 2 + 1] = 1; ndim2[i * 2 + 0] = dim2[i]; ndim3[i * 2 + 1] = dims[i] / blk[i] - 0; ndim3[i * 2 + 0] = blk[i]; } complex float* tmp = md_alloc(2 * N, L, CFL_SIZE); long str1[2 * N]; long str2[2 * N]; long str3[2 * N]; md_calc_strides(2 * N, str1, ndims, 8); md_calc_strides(2 * N, str2, L, 8); md_calc_strides(2 * N, str3, ndim3, 8); md_clear(2 * N, L, tmp, 8); md_copy2(2 * N, ndim3, str2, tmp, str1, src1, 8); conv(2 * N, ~0, CONV_VALID, CONV_CAUSAL, ndims, dst, L, tmp, ndim2, src2); md_free(tmp); } #if 0 struct conv_plan* overlapandsave_plan(int N, const long dims[N], const long blk[N], const long dim2[N], complex float* src2) { return conv_plan(2 * N, ~0, CONV_VALID, CONV_CAUSAL, ndims, L, ndim2, src2); } void overlapandsave_exec(struct conv_plan* plan, int N, const long dims[N], const long blk[N], complex float* dst, complex float* src1, const long dim2[N]) { md_clear(2 * N, L, tmp, 8); md_copy2(2 * N, ndim3, str2, tmp, str1, src1, 8); conv_exec(plan, dst, tmp); free(tmp); } #endif void overlapandsave2(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], const complex float* src1, const long dims2[N], const complex float* src2) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims1[2 * N]; long ndims2[2 * N]; long shift[2 * N]; unsigned int nflags = 0; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { nflags = MD_SET(nflags, 2 * i); assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); // blocked output nodims[i * 2 + 1] = odims[i] / blk[i]; nodims[i * 2 + 0] = blk[i]; // expanded temporary storage tdims[i * 2 + 1] = dims1[i] / blk[i]; tdims[i * 2 + 0] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + // resized input dims1B[i] = dims1[i] + 2 * blk[i]; ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks? ndims1[i * 2 + 0] = blk[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2; // kernel ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } else { nodims[i * 2 + 1] = 1; nodims[i * 2 + 0] = odims[i]; tdims[i * 2 + 1] = 1; tdims[i * 2 + 0] = dims1[i]; ndims1[i * 2 + 1] = 1; ndims1[i * 2 + 0] = dims1[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = 0; dims1B[i] = dims1[i]; ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } } complex float* src1B = md_alloc(N, dims1B, CFL_SIZE); md_resize_center(N, dims1B, src1B, dims1, src1, CFL_SIZE); complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE); long str1[2 * N]; long str2[2 * N]; md_calc_strides(2 * N, str1, ndims1, CFL_SIZE); md_calc_strides(2 * N, str2, tdims, CFL_SIZE); long off = md_calc_offset(2 * N, str1, shift); md_copy2(2 * N, tdims, str2, tmp, str1, ((void*)src1B) + off, CFL_SIZE); md_free(src1B); conv(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, nodims, dst, tdims, tmp, ndims2, src2); md_free(tmp); } void overlapandsave2H(int N, unsigned int flags, const long blk[N], const long dims1[N], complex float* dst, const long odims[N], const complex float* src1, const long dims2[N], const complex float* src2) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims1[2 * N]; long ndims2[2 * N]; long shift[2 * N]; unsigned int nflags = 0; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { nflags = MD_SET(nflags, 2 * i); assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); // blocked output nodims[i * 2 + 1] = odims[i] / blk[i]; nodims[i * 2 + 0] = blk[i]; // expanded temporary storage tdims[i * 2 + 1] = dims1[i] / blk[i]; tdims[i * 2 + 0] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + // resized input dims1B[i] = dims1[i] + 2 * blk[i]; ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks? ndims1[i * 2 + 0] = blk[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2; // kernel ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } else { nodims[i * 2 + 1] = 1; nodims[i * 2 + 0] = odims[i]; tdims[i * 2 + 1] = 1; tdims[i * 2 + 0] = dims1[i]; ndims1[i * 2 + 1] = 1; ndims1[i * 2 + 0] = dims1[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = 0; dims1B[i] = dims1[i]; ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } } complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE); //conv(2 * N, flags, CONV_VALID, CONV_SYMMETRIC, nodims, dst, tdims, tmp, ndims2, src2); convH(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, src1, ndims2, src2); complex float* src1B = md_alloc(N, dims1B, CFL_SIZE); long str1[2 * N]; long str2[2 * N]; md_calc_strides(2 * N, str1, ndims1, CFL_SIZE); md_calc_strides(2 * N, str2, tdims, CFL_SIZE); long off = md_calc_offset(2 * N, str1, shift); md_clear(N, dims1B, src1B, CFL_SIZE); //md_copy2(2 * N, tdims, str1, ((void*)src1B) + off, str2, tmp, sizeof(complex float));// FIXME: md_zadd2(2 * N, tdims, str1, ((void*)src1B) + off, str1, ((void*)src1B) + off, str2, tmp); md_resize_center(N, dims1, dst, dims1B, src1B, CFL_SIZE); md_free(src1B); md_free(tmp); } void overlapandsave2NE(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], complex float* src1, const long dims2[N], complex float* src2, const long mdims[N], complex float* msk) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims1[2 * N]; long ndims2[2 * N]; long shift[2 * N]; unsigned int nflags = 0; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { nflags = MD_SET(nflags, 2 * i); assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); // blocked output nodims[i * 2 + 1] = odims[i] / blk[i]; nodims[i * 2 + 0] = blk[i]; // expanded temporary storage tdims[i * 2 + 1] = dims1[i] / blk[i]; tdims[i * 2 + 0] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + // resized input dims1B[i] = dims1[i] + 2 * blk[i]; ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks? ndims1[i * 2 + 0] = blk[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2; // kernel ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } else { nodims[i * 2 + 1] = 1; nodims[i * 2 + 0] = odims[i]; tdims[i * 2 + 1] = 1; tdims[i * 2 + 0] = dims1[i]; ndims1[i * 2 + 1] = 1; ndims1[i * 2 + 0] = dims1[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = 0; dims1B[i] = dims1[i]; ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } } complex float* src1B = md_alloc(N, dims1B, CFL_SIZE); complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE); complex float* tmpX = md_alloc(N, odims, CFL_SIZE); long str1[2 * N]; long str2[2 * N]; md_calc_strides(2 * N, str1, ndims1, sizeof(complex float)); md_calc_strides(2 * N, str2, tdims, sizeof(complex float)); long off = md_calc_offset(2 * N, str1, shift); md_resize_center(N, dims1B, src1B, dims1, src1, sizeof(complex float)); // we can loop here md_copy2(2 * N, tdims, str2, tmp, str1, ((void*)src1B) + off, sizeof(complex float)); conv(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, nodims, tmpX, tdims, tmp, ndims2, src2); long ostr[N]; long mstr[N]; md_calc_strides(N, ostr, odims, sizeof(complex float)); md_calc_strides(N, mstr, mdims, sizeof(complex float)); md_zmul2(N, odims, ostr, tmpX, ostr, tmpX, mstr, msk); convH(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2); md_clear(N, dims1B, src1B, sizeof(complex float)); md_zadd2(2 * N, tdims, str1, ((void*)src1B) + off, str1, ((void*)src1B) + off, str2, tmp); // md_resize_center(N, dims1, dst, dims1B, src1B, sizeof(complex float)); md_free(src1B); md_free(tmpX); md_free(tmp); } void overlapandsave2NEB(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], const complex float* src1, const long dims2[N], const complex float* src2, const long mdims[N], const complex float* msk) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims2[2 * N]; long nmdims[2 * N]; int e = N; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); assert((1 == mdims[i]) || (mdims[i] == dims1[i])); // blocked output nodims[e] = odims[i] / blk[i]; nodims[i] = blk[i]; // expanded temporary storage tdims[e] = dims1[i] / blk[i]; tdims[i] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + if (1 == mdims[i]) { nmdims[2 * i + 1] = 1; nmdims[2 * i + 1] = 1; } else { nmdims[2 * i + 1] = mdims[i] / blk[i]; nmdims[2 * i + 0] = blk[i]; } // resized input // minimal padding dims1B[i] = dims1[i] + (dims2[i] - 1); // kernel ndims2[e] = 1; ndims2[i] = dims2[i]; e++; } else { nodims[i] = odims[i]; tdims[i] = dims1[i]; nmdims[2 * i + 1] = 1; nmdims[2 * i + 0] = mdims[i]; dims1B[i] = dims1[i]; ndims2[i] = dims2[i]; } } int NE = e; //long S = md_calc_size(N, dims1B, 1); long str1[NE]; long str1B[N]; md_calc_strides(N, str1B, dims1B, sizeof(complex float)); e = N; for (int i = 0; i < N; i++) { str1[i] = str1B[i]; if (MD_IS_SET(flags, i)) str1[e++] = str1B[i] * blk[i]; } assert(NE == e); long str2[NE]; md_calc_strides(NE, str2, tdims, sizeof(complex float)); long ostr[NE]; long mstr[NE]; long mstrB[2 * N]; md_calc_strides(NE, ostr, nodims, sizeof(complex float)); md_calc_strides(2 * N, mstrB, nmdims, sizeof(complex float)); e = N; for (int i = 0; i < N; i++) { mstr[i] = mstrB[2 * i + 0]; if (MD_IS_SET(flags, i)) mstr[e++] = mstrB[2 * i + 1]; } assert(NE == e); const complex float* src1B = src1;//! //complex float* src1B = xmalloc(S * sizeof(complex float)); //md_resizec(N, dims1B, src1B, dims1, src1, sizeof(complex float)); // we can loop here assert(NE == N + 3); assert(1 == ndims2[N + 0]); assert(1 == ndims2[N + 1]); assert(1 == ndims2[N + 2]); assert(tdims[N + 0] == nodims[N + 0]); assert(tdims[N + 1] == nodims[N + 1]); assert(tdims[N + 2] == nodims[N + 2]); //complex float* src1C = xmalloc(S * sizeof(complex float)); complex float* src1C = dst; md_clear(N, dims1B, src1C, sizeof(complex float)); // must be done here #pragma omp parallel for collapse(3) for (int k = 0; k < nodims[N + 2]; k++) { for (int j = 0; j < nodims[N + 1]; j++) { for (int i = 0; i < nodims[N + 0]; i++) { complex float* tmp = md_alloc_sameplace(N, tdims, CFL_SIZE, dst); complex float* tmpX = md_alloc_sameplace(N, nodims, CFL_SIZE, dst); long off1 = str1[N + 0] * i + str1[N + 1] * j + str1[N + 2] * k; long off2 = mstr[N + 0] * i + mstr[N + 1] * j + mstr[N + 2] * k; md_copy2(N, tdims, str2, tmp, str1, ((const void*)src1B) + off1, sizeof(complex float)); conv(N, flags, CONV_VALID, CONV_SYMMETRIC, nodims, tmpX, tdims, tmp, ndims2, src2); md_zmul2(N, nodims, ostr, tmpX, ostr, tmpX, mstr, ((const void*)msk) + off2); convH(N, flags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2); #pragma omp critical md_zadd2(N, tdims, str1, ((void*)src1C) + off1, str1, ((void*)src1C) + off1, str2, tmp); md_free(tmpX); md_free(tmp); }}} //md_resizec(N, dims1, dst, dims1B, src1C, sizeof(complex float)); //free(src1C); //free(src1B); } void overlapandsave2HB(int N, unsigned int flags, const long blk[N], const long dims1[N], complex float* dst, const long odims[N], const complex float* src1, const long dims2[N], const complex float* src2, const long mdims[N], const complex float* msk) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims2[2 * N]; long nmdims[2 * N]; int e = N; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); assert((1 == mdims[i]) || (mdims[i] == dims1[i])); // blocked output nodims[e] = odims[i] / blk[i]; nodims[i] = blk[i]; // expanded temporary storage tdims[e] = dims1[i] / blk[i]; tdims[i] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + if (1 == mdims[i]) { nmdims[2 * i + 1] = 1; nmdims[2 * i + 1] = 1; } else { nmdims[2 * i + 1] = mdims[i] / blk[i]; nmdims[2 * i + 0] = blk[i]; } // resized input // minimal padding dims1B[i] = dims1[i] + (dims2[i] - 1); // kernel ndims2[e] = 1; ndims2[i] = dims2[i]; e++; } else { nodims[i] = odims[i]; tdims[i] = dims1[i]; nmdims[2 * i + 1] = 1; nmdims[2 * i + 0] = mdims[i]; dims1B[i] = dims1[i]; ndims2[i] = dims2[i]; } } int NE = e; // long S = md_calc_size(N, dims1B, 1); long str1[NE]; long str1B[N]; md_calc_strides(N, str1B, dims1B, sizeof(complex float)); e = N; for (int i = 0; i < N; i++) { str1[i] = str1B[i]; if (MD_IS_SET(flags, i)) str1[e++] = str1B[i] * blk[i]; } assert(NE == e); long str2[NE]; md_calc_strides(NE, str2, tdims, sizeof(complex float)); long ostr[NE]; long mstr[NE]; long mstrB[2 * N]; md_calc_strides(NE, ostr, nodims, sizeof(complex float)); md_calc_strides(2 * N, mstrB, nmdims, sizeof(complex float)); e = N; for (int i = 0; i < N; i++) { mstr[i] = mstrB[2 * i + 0]; if (MD_IS_SET(flags, i)) mstr[e++] = mstrB[2 * i + 1]; } assert(NE == e); // we can loop here assert(NE == N + 3); assert(1 == ndims2[N + 0]); assert(1 == ndims2[N + 1]); assert(1 == ndims2[N + 2]); assert(tdims[N + 0] == nodims[N + 0]); assert(tdims[N + 1] == nodims[N + 1]); assert(tdims[N + 2] == nodims[N + 2]); //complex float* src1C = xmalloc(S * sizeof(complex float)); complex float* src1C = dst; md_clear(N, dims1B, src1C, CFL_SIZE); // must be done here #pragma omp parallel for collapse(3) for (int k = 0; k < nodims[N + 2]; k++) { for (int j = 0; j < nodims[N + 1]; j++) { for (int i = 0; i < nodims[N + 0]; i++) { complex float* tmp = md_alloc_sameplace(N, tdims, CFL_SIZE, dst); complex float* tmpX = md_alloc_sameplace(N, nodims, CFL_SIZE, dst); long off1 = str1[N + 0] * i + str1[N + 1] * j + str1[N + 2] * k; long off2 = mstr[N + 0] * i + mstr[N + 1] * j + mstr[N + 2] * k; long off3 = ostr[N + 0] * i + ostr[N + 1] * j + ostr[N + 2] * k; md_zmul2(N, nodims, ostr, tmpX, ostr, ((const void*)src1) + off3, mstr, ((const void*)msk) + off2); convH(N, flags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2); #pragma omp critical md_zadd2(N, tdims, str1, ((void*)src1C) + off1, str1, ((void*)src1C) + off1, str2, tmp); md_free(tmpX); md_free(tmp); }}} }
cq_fmt_plug.c
/* * This software is Copyright (c) Peter Kasza <peter.kasza at itinsight.hu>, * and it is hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_cq; #elif FMT_REGISTERS_H john_register_one(&fmt_cq); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #define OMP_SCALE 2048 // XXX #endif #include "arch.h" #include "misc.h" #include "params.h" #include "common.h" #include "formats.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "cq" #define FORMAT_NAME "ClearQuest" #define FORMAT_TAG "$cq$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "CQWeb" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define SALT_SIZE 64 // XXX double check this #define SALT_ALIGN MEM_ALIGN_NONE #define BINARY_SIZE 4 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static char saved_salt[SALT_SIZE]; unsigned int AdRandomNumbers[2048] = { 0x7aa03f9e, 0x2be5e9c7, 0x1b5ceb7b, 0x32243048, 0x3cb12e04, 0xe90d2e8f, 0xace8842a, 0xdbc021e2, 0xdb7e4414, 0x9414168d, 0xec94d186, 0xb0d45b52, 0xefa8a505, 0xee4ac734, 0xee7f3583, 0xf37a1bd0, 0x258cd1c7, 0xa93a5bf7, 0x347a23f6, 0xbf68b272, 0x5c89e744, 0x4faa8fc0, 0x54fa4bc1, 0x8f4db7cc, 0xcc78a54c, 0x84012379, 0xbb997725, 0x234612c5, 0x9b8a7120, 0xa2ea15c9, 0xa1b03515, 0xd68c512c, 0x90cbbcb0, 0xa677c1d3, 0xad4edf73, 0x0fbb4c6c, 0x7a70637d, 0x920c3ef4, 0xc31f9edf, 0xc0c29c40, 0xe0547468, 0x8af5778a, 0x4910f9e4, 0x553744df, 0xe9e5d82f, 0x9f648de3, 0xc366b6e0, 0xd2b1f83d, 0xca04733f, 0xcf3603b1, 0x27a7e70f, 0x9981f60a, 0xdd4e2cf8, 0xbcb811b3, 0x5d74ecc1, 0xa48e7106, 0x16539a54, 0xbdce7967, 0xf08c13d2, 0x2698c222, 0x3343d62d, 0xebf68da7, 0xc2bc9948, 0xea757b40, 0x37f0de67, 0xa03a4d89, 0x2cfc3cb7, 0x89230393, 0x6711f91d, 0x3812fb31, 0x9a105ad2, 0xf713d767, 0x4bd0d6c5, 0x4e4f9760, 0x9144e67b, 0x2b5b1540, 0xed6f8cd3, 0x1ab6de1d, 0x94fd67ae, 0x5fec29f0, 0x9d2f6c66, 0x701797be, 0x7ecd184e, 0x0e58eba6, 0x189fd557, 0xe70a447f, 0x3140d5ce, 0x46d55d28, 0xf77cb54a, 0x2eb11f3e, 0x331059a8, 0x1bcb4f0e, 0x253b1132, 0x9ed96195, 0xfadee553, 0x60939883, 0x76ea1724, 0x64ad91a9, 0xce605783, 0x5b17c228, 0xbb45f357, 0xee888899, 0xe36bda7e, 0xb1e6fdb6, 0xd47b22cb, 0xba2b4b6f, 0x31eb5d46, 0x72865ff5, 0xecc9077f, 0xe59c7a9a, 0xe2484234, 0x6c250954, 0x75bae5d1, 0x7d8ffc5c, 0x14aa2302, 0x0ea92748, 0xfc9fe3aa, 0x28295969, 0x2f38760d, 0xd335cab1, 0x7ffcaead, 0xcdeb00fc, 0x5aa4766f, 0xc57deab9, 0xfbeb9ac4, 0xfc59f737, 0xdc8a3d1d, 0xd5a04c8a, 0x63afc85c, 0x5a062866, 0x7feadb01, 0x9d29586e, 0x4bc63a4e, 0xc2e7a653, 0x80d132b7, 0x306d04e9, 0x27b4e1df, 0x2d4e5f39, 0xbc7f2f05, 0x8efb7bc4, 0x834368e0, 0xe37ac5c2, 0x07f7741c, 0x7d70f32b, 0xd8fc568c, 0x52e86793, 0x8a95993e, 0xc086bd26, 0xa0d0c8f9, 0x2262519a, 0xc7a79adc, 0x658ee58a, 0x94376223, 0x631fbdc3, 0x3433f9b0, 0xe469e054, 0x7b187772, 0x6ea94b34, 0x88515df2, 0xaa7c0e2b, 0xd688308e, 0x58f4d8de, 0xb3df5108, 0xa977f53d, 0x2f8bf273, 0x98c61997, 0x4c4f7cbc, 0x81a5a959, 0x8b38b887, 0x17c0d440, 0x03459240, 0x0e049148, 0xfa28dda2, 0xa364ab5b, 0x1f779ce8, 0x9013690f, 0xbf9b1284, 0x7a1704ac, 0x9deb5e71, 0x1b97bc67, 0x8560a7c3, 0xf95dc105, 0x5be1c16f, 0xa6f93b31, 0x9d540073, 0xbe36f269, 0xec1ad083, 0xb0d82bf0, 0xa362120c, 0x5d40e591, 0x4383ee00, 0xfc2bd2b3, 0x03415c36, 0x3514e8f4, 0x39fce373, 0xd8b0083c, 0xfaaf9375, 0xad0cf95b, 0x7618f7bd, 0xa1172c4f, 0xe4d6fc97, 0xd0d7d41c, 0x665d7461, 0x05a9ffd1, 0xdd7380d8, 0x2c334bab, 0xe89378a6, 0xe1b5a151, 0xb3d52dec, 0x3298d083, 0x2ef54a8c, 0x06f48c54, 0x34172d75, 0xee7a4a5b, 0x0bcbb2c4, 0xedf58b49, 0x8bbc49f3, 0x3c5c2876, 0xf8faa9d2, 0xd5c27925, 0x8b31e871, 0xce021186, 0xff86fbb1, 0xce65d1a6, 0x2ddc62f1, 0x4be585e1, 0x69554205, 0x4a4144f0, 0xd8658572, 0x7ecdf4e5, 0xeca38dba, 0xba099ee0, 0x9b776c4e, 0x406aa6c7, 0x1b2920d2, 0x20a4fe17, 0x7eefab23, 0xf6cedf61, 0x580739bc, 0xdb0d8ee2, 0xbcceef67, 0xbfeaeedf, 0x396c4060, 0xccb400cc, 0x2e411cbb, 0x671672e1, 0x25a6139b, 0x9ee52aa1, 0x7494b0f1, 0xd967d261, 0x7c3351f6, 0xcb1b564c, 0x3ebd0cbf, 0x1e451dcb, 0x197de7e2, 0xb988e765, 0x4a56963b, 0x1c1c8571, 0xc77711b8, 0x56a92f0e, 0xb480334e, 0x283314d6, 0xa2905c9a, 0xfb7a96f1, 0x033f43a9, 0xea71059a, 0x266c4710, 0x4528c417, 0x6f3a6608, 0x45699d70, 0xd72db23a, 0x411dfdeb, 0xed52ad3d, 0x9c1cd588, 0x15e8e29d, 0xe36911aa, 0x2e65957b, 0x085d2e03, 0x419ee083, 0x0b257fb9, 0x30aa0a12, 0xdc7358a7, 0xaa9d75b8, 0xcc040ac1, 0x178035c6, 0x53916d0e, 0x54f00e79, 0xf5de4034, 0xd66ff28b, 0x77fd3f74, 0x512ca7d5, 0xd6e48b64, 0xbeb2e6e6, 0xfbbc9e4d, 0xfae7f661, 0x8d529091, 0xd43abeab, 0xc445eab2, 0x31c276ea, 0x3b02aa3c, 0x104555ae, 0xf31fca4c, 0x7c86bda3, 0xbb8fa8e1, 0x561ef23b, 0xc7c3ddac, 0xc049a3b2, 0xb55ce3c1, 0x793c8199, 0xaf9cf13b, 0xd1bedce3, 0x5c9d8d47, 0x975973cd, 0xcc747711, 0xecc9b8cd, 0xc38edb0e, 0x10d46466, 0xd1742665, 0x34de2abc, 0x6af0415f, 0x33eaabe7, 0xeb5b3c88, 0xe45dbb41, 0x46a60558, 0xec3ee7fb, 0x8648812f, 0x779004e2, 0x957369ad, 0x2859ec9e, 0xd7500913, 0x7ffdbaff, 0x653c7581, 0xcb2fbe4e, 0x889a0521, 0xc2c47e53, 0x27357d53, 0x6f8e3752, 0x14fed547, 0x78f92e55, 0x12168b52, 0x0cecbc88, 0xadc37f82, 0x5c65b902, 0x464adda3, 0x51701de4, 0x94f581be, 0x3a8bfe19, 0xd928e9fc, 0x4e621fd7, 0x463c7b1a, 0xa5d610c6, 0x5225860f, 0x31f71d52, 0x59da3e38, 0xf979401f, 0x457831b6, 0x19b19ff9, 0x212898b8, 0x9d9a330d, 0x9cbbc514, 0xb1b28903, 0x92161213, 0xcd13324f, 0xa7d072ff, 0x314eb9e8, 0xbf0981ce, 0x00212756, 0xa4aa1438, 0x4b8a6088, 0x35cfaec8, 0xdd1def33, 0x7a868772, 0x43faf1be, 0x7140cda5, 0x86a8ccf2, 0xaa95fc06, 0x19eaaee0, 0x309e6b2a, 0x6943426b, 0x2c687aca, 0x9aa1e0dd, 0x5bf701cd, 0x129364b8, 0xeac81cbf, 0x3a1193ec, 0xd224cb9d, 0x0bbd5135, 0x82eb9c9d, 0x3c965dbf, 0xf6779412, 0xc71f63da, 0x28eedaa1, 0x63a4f5ba, 0x07cdd4e0, 0x072faeff, 0x21ff534d, 0x0d35cc3f, 0x4e0d4e2e, 0x75851a2f, 0x2e6779f2, 0xda7a104a, 0xbf129c30, 0x8b6305e9, 0x8d5bd156, 0xc3143454, 0x01f988a1, 0x55930029, 0xfc2f1619, 0xc4f9f598, 0x60c2ca05, 0x0c7138d9, 0xb3a34e45, 0x2f228b1a, 0x8ce79f7b, 0x0433eefb, 0x0bba45cc, 0xd9b8abdc, 0x77798336, 0xd46b5757, 0xf3f4308a, 0x8e4984c1, 0x18d3dff2, 0x43cd6cbb, 0x475cab00, 0xa0baa888, 0x7d688498, 0x33975596, 0xa8e3b619, 0x8258d065, 0x1e939418, 0xae47dcaf, 0x56a1311b, 0x41650078, 0x9ca8280b, 0x2df5bb50, 0x8ca64fba, 0x840e8608, 0x0ea9643d, 0x64666095, 0x76ae3ba8, 0x31409202, 0x5c076878, 0xa15154f5, 0xa33ad25c, 0x5d76ea5e, 0x89d98e0c, 0x7d0888c2, 0xce8ea846, 0x5422cb9c, 0x4ed2233e, 0x7a956e89, 0x9f132d5d, 0xb21ecaf1, 0xaae3eeb0, 0x5b6ca2c8, 0x4dc9ee83, 0x970b1474, 0x990ccca3, 0x223acb1d, 0x8136038e, 0xd3f3f287, 0x869f34e7, 0x11966837, 0x9952dab8, 0xdb3077d8, 0x9fe3cdd8, 0x892915cf, 0x658ae998, 0x6fa07e4a, 0xe28746ed, 0xa7821e30, 0x59d88b18, 0x1d1a7922, 0xe19a82ec, 0xd7e17b1b, 0x4d77aaee, 0x954c1f25, 0xad312a39, 0xb2fc818a, 0x29d17df3, 0x498f9862, 0xbba03635, 0xa4be43d1, 0x712e234c, 0xee3d9921, 0x87d1e33d, 0x8606d3cf, 0xc252595e, 0x6dd206de, 0xd381f5bb, 0xc0d9420f, 0x459e1991, 0x0c1892b8, 0xc79fd935, 0x416cdabe, 0xb57690a6, 0x087ea862, 0x763aff63, 0x15337abd, 0x6455d3f6, 0x524034c7, 0x7bb42336, 0x41669b3f, 0x26574372, 0xbc949e2e, 0x211af8fb, 0x7416c18e, 0x6c7c01c8, 0x20edce1e, 0xf858ab54, 0x2cde56af, 0xd4a31e32, 0xb6947234, 0xa8694d27, 0x90f90227, 0x257413b1, 0xacd234a4, 0x17e19de7, 0x71b63409, 0x8d67fb39, 0x0217d598, 0xcb548c2d, 0xce3c13da, 0x2d6c3984, 0xcc3c93db, 0x6d10ae7c, 0x893b3eb1, 0xcf58d7d6, 0x8038f673, 0xf5d60b09, 0x85c6ea1c, 0xbe121052, 0x4519c0cd, 0xe4032569, 0xc0a8e700, 0x2a72e706, 0x8534a64f, 0x9f4a1361, 0xd4ef416b, 0x43336420, 0x323e2a96, 0xd5ea02c9, 0x59782e1b, 0x4c4e02e2, 0x75171d80, 0x6aa8fba1, 0xfdc8233c, 0x36eee3c0, 0x18643046, 0xce2d3fe4, 0x73ed0154, 0xfc8dfea4, 0xd43c9f90, 0x2bbdf9fa, 0x0dd43b40, 0x74c54101, 0xd4dd41ed, 0xf06a79a5, 0xb85687ae, 0x5f85dfbd, 0xd630cc07, 0xd7105cd7, 0x46f30505, 0x15575a70, 0x1b3dff40, 0x24a9e552, 0xcc52f6c4, 0xddca62fe, 0x094da898, 0xbbf990e8, 0xd2531586, 0x79a6abe4, 0xd67bac65, 0xaf2e08a7, 0x36c28310, 0x85f3fb0a, 0x188a8332, 0x7ed37c9c, 0x9fd8d6c6, 0xba87d2eb, 0x0331d15f, 0x11438530, 0x86848311, 0x81b555f1, 0xf582213f, 0x1b39290e, 0x644ed913, 0x1a15eef2, 0x30df2149, 0x9aee5345, 0xdaf33a78, 0x1a36f066, 0xbc6d937e, 0xf0553748, 0x3c4d51a4, 0xac14c751, 0xc691eb3a, 0xd08c9cde, 0x93a2716c, 0xd46bddb8, 0x86504249, 0xd331a584, 0xbb0a34b0, 0xe095b710, 0x637512a4, 0x1b4b520d, 0xd51e5b11, 0x561b6ea5, 0xf1d870ab, 0xed478d20, 0x63e61e6d, 0xf159f48a, 0xc5e9d72c, 0x5c5ce2e0, 0x65f1e7a6, 0x3d98331a, 0x54596f93, 0x98a1fcef, 0xd603f018, 0xdf0a0fe9, 0x3133cc21, 0x1f66fa5a, 0x34d24f08, 0x3f6ffbdc, 0x8f5a7faf, 0xb6f7f344, 0x44e8f0fc, 0xb84d7ede, 0x6a193aa5, 0xd7846b26, 0xdc93b0fe, 0x8eb61507, 0x57502a82, 0xd0068b11, 0xbfc1a056, 0x5766badc, 0xcd80e8cb, 0x3ad80ff6, 0x6b07c122, 0x2fc821a0, 0x924b7f93, 0x0cb37099, 0x8f078060, 0x74ea3ad4, 0x12006e5d, 0x513db72f, 0x9ed5dc6d, 0x0f8763e1, 0xd3746c15, 0x69043f07, 0x2f1f0e29, 0x1e134f46, 0x1b673ace, 0x352869fa, 0xb903f872, 0xf44e83a9, 0x0358ba50, 0xad94f27d, 0xe3799bc7, 0x1aa584be, 0xbf6496e8, 0x020edea1, 0x8f9f1e56, 0x335d8aad, 0xb52f8536, 0x1047d008, 0x705fc7c5, 0x82a15021, 0x38ec73c5, 0x83bbeec3, 0xafe5dfad, 0x014d9399, 0x132c1fdf, 0x90781f7f, 0x9fdf14ca, 0x4f9e619e, 0xcffb2139, 0xd868773d, 0x53ab6332, 0xc7139a87, 0x069c6fa1, 0xaea8ddc8, 0xebf10a04, 0x594a74b2, 0xaf80e4a8, 0x4ea207a5, 0x0017f466, 0x780f7b67, 0x35b7e34c, 0xd5c9b942, 0x55096faf, 0xf46db3d1, 0x4ab94cc8, 0x2b69bb09, 0xd95f0cbc, 0x97694c99, 0xd1cfe216, 0xb21e227d, 0xea436ce4, 0xbd06f5f2, 0x69b7ed73, 0xdc368c1b, 0x6bc96d16, 0x00af13db, 0x33bac433, 0xe3b28392, 0x985ce7fa, 0x49981994, 0x3acd1335, 0x8a72de1d, 0x41500863, 0xa2eba987, 0x1928c7d1, 0xa599918a, 0x9b8ff4d9, 0x698b190b, 0xfdb34c56, 0x53fb5413, 0x4879c088, 0x3bef71a7, 0x54f32abc, 0xe3fe3908, 0xa8b79d24, 0x50f69baf, 0xe87713e1, 0xceeaa849, 0xb5054c6e, 0x99076ad6, 0x4cecff0b, 0x21880bcb, 0x78b035b6, 0x2ba89560, 0x82d35f09, 0xeb2456ed, 0xc404c4b3, 0xd88f0c49, 0xd07cb42f, 0x37c1d01d, 0xb81701af, 0x1b435610, 0x6945f3b8, 0xd1d51325, 0x1b89be74, 0x67fb8124, 0x48f5ee8e, 0x80181ba7, 0x10817a0c, 0x931890cb, 0xdd21311d, 0x8530cda9, 0x8e31ca20, 0x3bc790ba, 0xbb769f75, 0xd87d8134, 0x1e586f4e, 0x0afef375, 0x0756d640, 0x7e89338e, 0x7ffd1904, 0x83dbd0c7, 0xcfd2f15b, 0xfd2675d6, 0xab8cd735, 0x6c95ef68, 0x4e713995, 0x90835487, 0x193af1c9, 0xfe13ce8f, 0x82beba7f, 0x8a9c42b3, 0x44635fa6, 0xb5b71efc, 0x12b4e48a, 0xf7af1338, 0xd1ac39a8, 0xbacc03c1, 0xf24f7267, 0x8e0f9e90, 0x189cabb4, 0x3c8019a1, 0x4abecfee, 0x9d972bc8, 0xf5e931b0, 0xc26b7de1, 0x1d56f5d3, 0x77079560, 0x04ae0fac, 0x03220a78, 0xe0b95f42, 0x92d112ce, 0x2bd2afad, 0x80755047, 0x41a23ec9, 0xb49f602c, 0x3761a3f6, 0x935ee1cf, 0x1b64abe7, 0x006e19a9, 0x0b720985, 0xd8155ad3, 0x15539fef, 0xe2a32706, 0x86ac3c63, 0x5b606e14, 0x529e4a4e, 0x5d862f33, 0x68db23a7, 0x276819b7, 0x8c1e1a2d, 0xb8bdd15a, 0x7d0eb98e, 0xd3301dee, 0xeddd8b74, 0xb73dad26, 0xded6a141, 0x1f233c79, 0x324da954, 0x3eb44363, 0x530f5631, 0x60aa4163, 0xa0a53376, 0x46447217, 0x21c4a239, 0xf2813423, 0x72ec278c, 0x87b8bf19, 0x630ff66d, 0xd83cf3a0, 0x9f889ff6, 0xb99fc512, 0xce97a6ac, 0x018016d3, 0xb6c29965, 0xc06b4718, 0x837bdedb, 0x4cf09f8e, 0xe9dc4763, 0x7696df71, 0x03182a80, 0x4437d055, 0xea83a340, 0x749af1a9, 0xab377e67, 0x9c568a33, 0x15a7cce4, 0x7a0a58e1, 0xfa221945, 0x55bf880c, 0x31607015, 0xb3780687, 0x5cb9bd63, 0xfec69f6b, 0xd2e21926, 0x8b3dad02, 0x4f3b41bc, 0x75e0e8cc, 0x10589810, 0x5729ba38, 0xb0a588e1, 0x7c821c0a, 0xbc0075a0, 0x3ab20949, 0xaff6b176, 0xc8497f0b, 0x1871d8dc, 0x1ff4a370, 0x824bcf3b, 0x6eca58a6, 0xa009169f, 0xf610f928, 0x23a867df, 0x383522b8, 0xc5f83a93, 0x2c3cd773, 0x55dbf231, 0x7c64920b, 0x5ef0d8fa, 0x31db2bfe, 0x45439b02, 0x230e546f, 0xc48477fc, 0x7e83db3c, 0x989f9765, 0x36d109b3, 0x22d9ae17, 0x5b19aa54, 0xec1161f1, 0x4abd5b16, 0xf3264a50, 0x0488b52e, 0x3e0fba91, 0xd69b929c, 0x083c42ee, 0x4c37fc2d, 0xada375ec, 0xf4f80d69, 0xe645063a, 0xa9aa2bf5, 0xfb3169fb, 0xc2a162ab, 0x794a6f5b, 0x68838c2c, 0xf40b2c40, 0x4ef814f1, 0x4f847bc0, 0x5333a90e, 0x9b3f17a3, 0xa9af089f, 0xe1991375, 0x3f7384b6, 0x1fb942c7, 0x97365bba, 0xd3b6523a, 0x2d76c616, 0x5d7b944c, 0x9dffc0d9, 0x6ef0ca09, 0x745d606e, 0xc25f0d1d, 0x68335b59, 0xa5a9aa32, 0x5f609d6f, 0x58012e9c, 0xc138c974, 0x0c62589a, 0x54c7026b, 0x78576d4f, 0x7ab860fe, 0x673810e2, 0x3cd73808, 0xa20364a5, 0xc750409a, 0xaff51879, 0xa4a598b0, 0xc4f113e9, 0x28da785e, 0xf41e4752, 0xf0608ba0, 0xa3434cf2, 0x5591372b, 0x4aa08a13, 0x0ec92e82, 0x714a0880, 0x643b37f1, 0x8a2bb7a2, 0x7db8db33, 0x6f800d7f, 0xce583d48, 0x07517baf, 0xdc1d9e7d, 0xab9a13cd, 0x6f2614d0, 0xbf021e62, 0xbbd113d2, 0x426f03a7, 0xcd1cd97e, 0x321886e0, 0x894b01a2, 0x6ef2e9aa, 0x4aa8b1e0, 0xc2c2ff9a, 0x37a2d9a6, 0x3e4b059b, 0x4635f62b, 0x0ea4b349, 0x4bf910ea, 0x24d44dae, 0xc60dba2b, 0x017df1de, 0x7d7e20c1, 0x6db4c345, 0xfa704510, 0xcf23dded, 0x0c6f7db9, 0x863aea75, 0x2266ac92, 0x6434e1ec, 0xbd1f2c9f, 0x3a1f0877, 0x45c04d11, 0xc8c0a4de, 0x435beef5, 0x02c271e8, 0x86b845ba, 0x641c1fee, 0x33c0762f, 0x79d746f3, 0x2071528a, 0xd96a62b5, 0x6457d349, 0xbcf4079c, 0xb97d7c3f, 0x7bd9b37c, 0x8d22cb6d, 0xd108173d, 0x0d681c0c, 0xcf551b4f, 0x2c39510d, 0x9ebc64c9, 0x1ef42072, 0x12508fce, 0xb08f1e43, 0xe189dbf9, 0x210af02d, 0xce1ada8a, 0x5843de7d, 0xc8f2eb59, 0x97ebfc1f, 0xde0b9b2a, 0xc7ca01e9, 0x0a605020, 0x9d3265f2, 0x0ad04e2b, 0x3d203fe2, 0xb1cb3883, 0x7e3ee25a, 0x02483609, 0xac38260f, 0x6d028d74, 0xdffeb968, 0x1c1d8074, 0x53be10b2, 0x184ed1cb, 0x4e0bc9e4, 0x7d600a82, 0xd5efa6f7, 0xbe35cb9d, 0xe7aab6ff, 0xd24efe88, 0x4f95ee51, 0x531a720f, 0x9281a4d1, 0x349fe52e, 0x0372e4fb, 0xc0f4e1fe, 0xd22ca046, 0x7ad0d314, 0xb1917a84, 0x65ddfdc8, 0xf9e7d7f2, 0x731c3ae6, 0x3ccf4730, 0x97503482, 0x9a75a545, 0xf3713091, 0x7074223f, 0x5ac2d900, 0xbb5bbda3, 0xf3ac9fe5, 0xc5f432f7, 0xc06ba069, 0x59d175ee, 0xf6624a20, 0x5ad27321, 0x44ec92f7, 0x5cdcda3c, 0xe4bfbaae, 0x61a61fdb, 0xeb78d322, 0x8a225c70, 0x5af8aa8e, 0x98dfebfa, 0x5e09f08a, 0x955bc9fa, 0xaf28b29a, 0x9795588a, 0x3a880cb4, 0xacfb27cb, 0xfadfac71, 0x26f58cd2, 0x8a9a8481, 0x5c73cf8f, 0x0a764745, 0x8e7525c6, 0x5a48f4bd, 0x9a038d74, 0xbe052e79, 0x50b8ab7b, 0x847b3fd6, 0xfd7af126, 0x910b5700, 0x89f74a62, 0xeacb449c, 0xba8ab0ec, 0xfb0d55df, 0xc9000ded, 0xa0e4fdc0, 0xd7fdbcbc, 0x776ad910, 0xf8ebe464, 0xb78641a8, 0x9e19f911, 0xa39f3c7f, 0x28798682, 0x770c0c67, 0xd6618589, 0x3824cfb9, 0x284362cc, 0x6f801c7b, 0x44473f26, 0xeb97128c, 0x9f5e29f8, 0xae790aec, 0xda7ce4f2, 0x2c37baad, 0xe0733abb, 0x2f79c97c, 0x56f014f5, 0x436f9427, 0x3fa2c41e, 0x03f4016e, 0xf29583a0, 0xdaa06497, 0xaef999c4, 0xde6098ea, 0x49d744e4, 0x9ac363c4, 0x71b11295, 0x1056e853, 0xa50f1c79, 0xf58aee75, 0xd2d98a60, 0xcc086a21, 0x633306d2, 0x43516bca, 0xf24c6958, 0x75bf4c20, 0xd1aa09ce, 0xd98701cb, 0x6497d417, 0xf09334da, 0x74005f98, 0x72241183, 0x3b7be9f5, 0xf4e1cf5d, 0xb44fe087, 0x9b347ba7, 0xb662e5db, 0xaf570647, 0x1b3d5eac, 0xf2d33276, 0xf694fa88, 0xfbf7ee8c, 0x722c4ccd, 0xb164605e, 0xfa22d883, 0x716dbb44, 0x00289aec, 0xfeb07061, 0x82f6934c, 0x7b9f20ae, 0x3d2832b2, 0x2127f401, 0x7e614a69, 0xb48b17be, 0x1e9061e2, 0xcdacbff9, 0x29519f11, 0xeb31bcdb, 0xc6a9a031, 0xa1fc0693, 0xdaaacb3d, 0x00177418, 0xd3b045d4, 0x575792d3, 0x7cc871a9, 0x4107f556, 0xd5dd7111, 0xecb8a31b, 0x88f6384e, 0x3e9559d2, 0xb87eab52, 0xddc8d45c, 0xcee4cadb, 0xb9672b94, 0x786129c5, 0x8a238446, 0x5cc9ce36, 0x897b89a3, 0x248f233f, 0x93a01697, 0xa9e32583, 0x46318359, 0xb60bd733, 0xcb344995, 0x93fe4889, 0x60477ed8, 0xe0baae66, 0x6e5f97b1, 0x36c221f3, 0xdecfa578, 0x45a9e7a5, 0xee38c8e3, 0x94209438, 0x0850dc00, 0x21d4792c, 0x0bff88c7, 0xfa703b77, 0x30ce06ab, 0x77bd6475, 0x07909087, 0xc8d5076a, 0x613d19bb, 0xc50da2d5, 0x77314616, 0x10f1ca59, 0xf5ed6e4e, 0x98132237, 0xa67d0346, 0xd17c5af7, 0xe49b092a, 0x2c6ac00a, 0xd6a18085, 0xa608ec39, 0x6f93259e, 0xfcd1c805, 0x119aa599, 0xee224e6a, 0xcd42adfc, 0x5c62cac2, 0x62739935, 0xbb2899f7, 0x3f708519, 0x6cbf5af7, 0x23dab7fb, 0x4c335483, 0x30835257, 0xcb83d782, 0x60495aba, 0x9631c6ff, 0xcdab4568, 0xa9ddb16e, 0x41f50aca, 0x12347371, 0x8d2d523a, 0xf5106c41, 0xb257c349, 0x8da2838f, 0xf1dd89f0, 0xac2752a4, 0x55ea2767, 0x825e66f9, 0x70249c8e, 0x5c98333a, 0xb3bf59c7, 0x4600b593, 0xdb32d803, 0xa009b1a3, 0x573c455f, 0x93c9610b, 0x0f076b69, 0x362b989e, 0x31d3ecfc, 0x86f3c663, 0x4b7db83c, 0x1333370e, 0xef093b3c, 0x8fbfc042, 0x1299de1d, 0xb59f15e9, 0x70a306d4, 0xcdd49b7f, 0xe0e78ac4, 0x3c327e27, 0x88f19b26, 0x0e979bc4, 0xedffe30f, 0x4f5001a8, 0xc4f15293, 0xd622c7ff, 0x51effe17, 0x50bba6a6, 0xce0d506b, 0xb7652789, 0xc41720af, 0x53e907a8, 0x616fa770, 0x5d033835, 0x1039b042, 0x61e5b922, 0xfe81924a, 0xf355025c, 0x6dc5a3f1, 0x3c8f6e63, 0x5b5fe252, 0xa8b6f433, 0xac6fbfa2, 0xf6b0e5b9, 0x20f5b0b4, 0x70f68390, 0x61ad174c, 0x2d359dd8, 0xa4ad7281, 0x0e00eee7, 0x79abb5de, 0x3405bcf7, 0x581d66b4, 0x0279f078, 0xd38722d7, 0x247fe970, 0x3c031225, 0xda242c14, 0x2a2b7ae6, 0x218cef99, 0x4005f76a, 0x1ef3e30a, 0x03b957a3, 0x5f4bc0bb, 0x682bc52e, 0xfd640d9d, 0xbddf1fc4, 0xd8e6d767, 0x69b15d06, 0x5f62cd28, 0x7119ada8, 0x1c0b776a, 0x59bd0273, 0xf6a6a315, 0x62b75968, 0x3f216017, 0x10cb81c2, 0x055266ff, 0x7e4bab81, 0x4959ee06, 0x60876e87, 0x5d6ed71a, 0x8cf8c0fd, 0x7df4d611, 0x3cc63f3f, 0x2fb7033e, 0x26154a95, 0x34057009, 0x822e5c53, 0x8867b292, 0x2d784f05, 0x3e9e7ea3, 0x760d9500, 0x780a7bc4, 0xd36732b2, 0xf36d5b98, 0x20c71e2b, 0x2bc8e9a7, 0x096be252, 0xb9b9236b, 0x8071dfd4, 0xacfc1e15, 0xcbbde7c9, 0x5f33b8f0, 0xbb63b209, 0x551eb7d6, 0x113e505f, 0xb076c142, 0x43044856, 0xaf552da0, 0xd921227b, 0x10817109, 0xdf903cff, 0x1419a111, 0x620ee003, 0xb614f2da, 0x321961d0, 0x650e8b24, 0xd5b909be, 0x79c7fbf6, 0x5849e0b0, 0x9342c0ba, 0x0fbf4e5e, 0xcd01430f, 0xd80f501a, 0xcbde9f45, 0xd1f7c565, 0x20fd41c4, 0x5fef8bb7, 0x755e7eca, 0x611c1ad5, 0x3e40beb2, 0x9e864b82, 0x69973554, 0x1d1d7c51, 0x3541f0a5, 0xa2aef7cd, 0xafa3c0e1, 0x1140c1b7, 0x82f9c5fd, 0x0e53bda8, 0xeb48c46a, 0xe6e5f766, 0x686f4800, 0x84ff9918, 0x3a299c37, 0x7c3a324f, 0x25295743, 0xb9131e22, 0xa9467eff, 0x88bd0285, 0xfe7b56c4, 0x3774cab6, 0x8b709b30, 0xe98eb68f, 0xf99ed1ae, 0x21c8eda6, 0x8ceb5ee2, 0x24474545, 0x8709e6f9, 0x72b0e80f, 0x66d9c7f5, 0xe2ca210b, 0xdfbd7e43, 0xc9c5335c, 0x68b085ce, 0x021f3760, 0x2805cab4, 0xc2817224, 0x33054445, 0x05fc88f5, 0x4ef25d45, 0x1aed0636, 0xcaa0b185, 0xa8eb115b, 0x595bde5a, 0x7772b19b, 0x4dd2ca0d, 0xfc5c2185, 0x78c831dc, 0x4ffef944, 0x78430100, 0x03850200, 0x58c49038, 0x5aeee65f, 0x818f6b1d, 0x21ac1b4b, 0xecf90ec1, 0xfc678952, 0xe8340c7c, 0x51c1d8ca, 0xbfc33274, 0x1a83815c, 0x49512c1b, 0x4d262ce3, 0xab2674eb, 0xa84710f8, 0x96c698e5, 0xd045f84b, 0xc2ff8580, 0xf7a42aa9, 0x6eee74eb, 0xfde04cd3, 0xaa18c506, 0xcb3c95dc, 0x4761f04c, 0x01a75b02, 0x395cb0ce, 0x6269b03c, 0x7a4976a5, 0x982a10f3, 0x42cf92cd, 0x8c110454, 0x055408da, 0x0ada639f, 0xf3d74ec6, 0x3525b6b4, 0x21d1437d, 0x5ec84bc1, 0x5a8256e9, 0xe03bc870, 0xd4ec26ad, 0xff77bf3b, 0xf1af0c32, 0x8d8ed6df, 0x02fa5021, 0x7d04282b, 0x9d16455e, 0x5656ed1c, 0x86015646, 0x4e911190, 0x97108eb4, 0xb95d284c, 0x9d379b17, 0xe7ea9203, 0x57092245, 0xb821ff82, 0xefcef176, 0x4c7885d5, 0x36a3990d, 0x4949c392, 0x3a37d261, 0xe3542623, 0xb8f04501, 0x49554eaa, 0x5784625e, 0xc5fdb9d5, 0x089b3dc0, 0xf4741645, 0x59c2b88b, 0x044c5ecf, 0xb5b895f4, 0x5e643350, 0x59fd5974, 0x18af0a5e, 0x1690a41e, 0x7afd7df8, 0xce194b1d, 0x7348105f, 0x55bdac13, 0x992f1f30, 0x7576c408, 0xd5986ca9, 0xfb5db4bb, 0xdd077a3e, 0x1555fd40, 0x8f43e64c, 0x0891c0b2, 0xc034b002, 0xa178edf1, 0x73c6aa69, 0x43768cbf, 0x50518a88, 0x5cea403e, 0x66621f52, 0x6454ada9, 0xfc5353d7, 0xb6367c33, 0x36083ea3, 0x7c6370d7, 0x871ce0df, 0x996a9db6, 0xfc85b23e, 0x4832a696, 0xd9e633e3, 0x8d57a607, 0xb065c6f6, 0x6b14e612, 0x640a3223, 0x7d40f089, 0x00e856a5, 0x02b89be7, 0xbb17308d, 0xf495ca2e, 0x36d73234, 0xe6efc3f6, 0x3e34538a, 0x50cae00d, 0xc659ceea, 0x8f9c4fd0, 0xffd2a14b, 0x8f3c0302, 0xad6bf0e2, 0x7da1cdf3, 0x587bb565, 0xf5a45c06, 0x7b58dfac, 0x8d5689a9, 0xdd8193b7, 0x0fea45f3, 0x849600fa, 0x71d1f8ee, 0xd9cd7337, 0x10ed0d73, 0xbfb107e3, 0xae79885f, 0xbc424999, 0xefa0a006, 0x66fcd210, 0xb8221c2c, 0x4e4fcb90, 0xbc19bd01, 0x13a6165e, 0xb2456d5c, 0x9de9927f, 0x5cad9384, 0xc5b8aca8, 0x709eaed3, 0x025d5327, 0x3334f98d, 0xc8775813, 0xa7e3a7b5, 0x4900d83c, 0xdb3e88aa, 0x798cf72d, 0xc5201d9c, 0xd301c8a5, 0x24aba004, 0x7fb00d06, 0x96b07431, 0x07e817d2, 0x5dc5e25c, 0xe3565527, 0x4e1527e2, 0x9b19ac07, 0xb97a792b, 0x810e2f87, 0xb7e67428, 0xbdc1ad62, 0x4d68b464, 0x86743e97, 0x5ac3a913, 0x74a330bc, 0xedbd0163, 0x17c1ca06, 0x3762d54b, 0x67cac477, 0x57fb1db4, 0xca5c3afa, 0xe9cdec2a, 0x3f517285, 0x6c123489, 0x8c9c347f, 0xdc352535, 0xfd72d8b0, 0x615adae6, 0xd2149734, 0xb4d087d2, 0x50777523, 0x152794d7, 0xa1de388e, 0xacbfb4a7, 0xcc792a3f, 0x0c9beff1, 0x4518932e, 0xbb10562a, 0x0231bbab, 0x34d6b5e4, 0xe56b9d51, 0x1179cf3a, 0x7c9cca04, 0x7e6804dd, 0x43749e37, 0x474abbed, 0xeca9da66, 0xdbc9f38a, 0xbd90f4f2, 0xca3e3208, 0xf6804e7b, 0x6c2f368b, 0x71865b53, 0x477e86cf, 0xf9556567, 0x99f1268d, 0x5b0d3742, 0x5c7759e9, 0x329a93f4, 0x6edb40ef, 0xd8238cf4, 0x14c6653d, 0xda60ef87, 0x845f263d, 0x5ecb0b8a, 0xc0318caa, 0xb6e9c384, 0xb375f88e, 0x8bbc46cc, 0x9cf69f27, 0x5ad28fa8, 0x458c2af7, 0xc848d9c1, 0xa8fc2598, 0xef9d6e05, 0x72e2b656, 0x548e9bd8, 0x57c6446f, 0x1ed6fa9f, 0xee2aabba, 0xa0ad6aeb, 0x9cf39373, 0xe1ef4e1e, 0x84aaf630, 0x51839901, 0x1fd11926, 0x7e8d187e, 0x3f0c9a19, 0xd15b2dd2, 0x87bcba56, 0x4e4abd25, 0xfe14d638, 0x632c77f2, 0xdd2fb790, 0xa7172b76, 0x51b3b07e, 0xbc087c39, 0x08e4bfcd, 0x835236c7, 0x2f55bb11, 0x01de9ed4, 0x3dfc98ce, 0x28776e03, 0x98fb6143, 0x3b05e401, 0x0999f936, 0x673cda1e, 0x9075e503, 0x80dbd8a1, 0x1d113be7, 0x368624b0, 0xd06b7118, 0xdc0378f6, 0x0c6aee59, 0x3c77c121, 0xb75108d5, 0xa1d6c1a7, 0xdacd595b, 0x71d9b0e5, 0x4afe1c59, 0x4a2cd1e8, 0x8c3eda88, 0x97ae95c2, 0x6bfacf26, 0x47182448, 0xdbd882ef, 0xae660019, 0xb0986ca0, 0xe8daf1be, 0xb18b34dd, 0x6ba59afa, 0x560c57ea, 0xf6be34ba, 0xcd89f600, 0x6403fcfe, 0x5e429947, 0x7d4886e1, 0x8018e754, 0xc17251e7, 0x36eab7c6, 0x9067ad78, 0xe95f1fe6, 0x44961501, 0x8619621c, 0xdb13e77d, 0xd5d6124d, 0xd36f8ac0, 0x9395356c, 0xf713d1e1, 0x2b3c9d15, 0x4dfb98f9, 0xb553ff7f, 0xd1675da5, 0x382dcfdb, 0x659ed198, 0xa0bfe7d7, 0x0c1fe7f7, 0x6ae7194d, 0x1966c9fe, 0x369f1f79, 0x1137c2c2, 0xf7a06345, 0xfe3f544b, 0xac35a2f4, 0xd7aea537, 0x9b37ff3b, 0xfc395a7b, 0xf3c2710a, 0xf7ec7804, 0x5f5820ca, 0x72b2a99c, 0x6162f1ca, 0x9f4288a2, 0xa888ed1e, 0x02208839, 0xea56c569, 0xace682bc, 0x95096878, 0xf33986ce, 0xbc3ab34e, 0x852fb06b, 0x4d809b7b, 0x3475e9c8, 0xe947baae, 0x18535080, 0x205c85fa, 0x5792f851, 0xe029ec48, 0xd4403f27, 0x587471da, 0x3bd97278, 0x91f1a328, 0x65ea5d8a, 0xc0cfbf0f, 0x135abf90, 0x62843a32, 0xdf6a7aa1, 0x79dc7616, 0xd091c454, 0x355e2d4a, 0xa54e04a6, 0x25719823, 0x41bfa322, 0x94ec342e, 0xbcd06558, 0x52775497, 0xdcd0c726, 0x8c8f3975, 0xbc31513b, 0xb9b3acec, 0x33bbeff5, 0xa3432872, 0xd8dd265e, 0xc1d9f64e, 0xe016c95d, 0x840b9c5e, 0xecdf0d3b, 0x9335eac7, 0xe319c342, 0xaf8a83ca, 0xc2f11b65, 0x8d40c919, 0x3b91cd82, 0x70aad694, 0x341ff4ee, 0xb3fdc758, 0x86e8e96c, 0x239e0308, 0x7daaf786, 0x6d9c3e2e, 0x028237e0, 0x652b0a79, 0x0f203491, 0xccb40e73, 0xb1954681, 0x7ab03780, 0x9cd9ef50, 0x43e720b7, 0xe7de746d, 0xade36a14, 0xbb4f7503, 0x21ef55aa, 0x45a0e8e3, 0x1156ea33, 0x1091d26b, 0xc8b9a8d0, 0x722df639, 0x92977f76, 0x8cb11fe6, 0x0aaeedc7, 0xb1096093, 0xe45ea74f, 0xc2b54ff5, 0x190e549f, 0x222192cb, 0x695f9d7e, 0x926d466a, 0x0dc294aa, 0x7e16a1b5, 0xa4bd2267, 0x19ee878a, 0xc5b8c83a, 0x001b6546, 0x6fbf7edf, 0xb3708024, 0x8e98402d, 0x86af015f, 0x38dfd5b8, 0xc50808ef, 0x29a71185, 0x85f233d5, 0x9dea5939, 0xce3cf02c, 0x45d68b76, 0x745a85b0, 0x6fe38075, 0x52e01b99, 0xc4693697, 0x2cd0bf67, 0x3edecb8f, 0xeb76f624, 0xe3eb94f4, 0xf587d9e5, 0x813543ea, 0x93dfaced, 0x018c23ad, 0xb6781fd7, 0xbd564fc3, 0x962da3f0, 0x389ab6b1, 0x5866fd23, 0x1f8b3979, 0xc10386bc, 0x4ef64f11, 0xb4572417, 0xa2f65667, 0xd68b6523, 0xa5512c00, 0x2d5f663e, 0x3bf700ff, 0x09ef3396, 0x451bcb0d, 0xfee39ccd, 0xf606c443, 0xb46ffa45, 0x85bcfa16, 0x7fc6efa9, 0x701ec2fe, 0xde98a301, 0xd9980668, 0xc5d004ae, 0xd03dbbb3, 0xb1d795c2, 0x2ab6203e, 0xfa237b58, 0xbf425321, 0x000e019a, 0x2547dbbf, 0xfb97b8ed, 0x08b09edd, 0x42cd7b68, 0x32198a7f, 0x87a2a72d, 0xe0a6a1aa, 0x2866775c, 0xc66e7ac5, 0x9edc41d3, 0x983a6ec5, 0xd3e9793a, 0xa53ad299, 0x38d774ce, 0x75ce7fbd, 0xb353f86e, 0xc37bc25e, 0x5f2b5532, 0x1975cde4, 0xc8294de5, 0x47fbc7c1, 0xb76b2789, 0xea88c920, 0x6c2145ba, 0x4c2211f2, 0x16a0e6de, 0xa2915469, 0xea2b14e2, 0x35202f43, 0xaa9c69de, 0xbd00bd0e, 0xbea9e3f0, 0xbfb0bb47, 0x74347ce4, 0x1bc15e8f, 0xd9f70c10, 0x968f1e31, 0xc55fa605, 0x47af7015, 0xe772ade0, 0x28a5c782, 0x0955a18f, 0x1054e43d, 0x5d7702b1, 0xd54a0e22, 0x82f0f8be, 0x787b2bce, 0x243ce7b8, 0xaa160942, 0x5e7477e9, 0x45cd0df6, 0x5bdca3d7, 0x6b992304, 0x4a8d3515, 0xfe356a57, 0xb011fc33, 0x0e2624c6, 0x0ba4ae36, 0x029fecca, 0x01ac36cc, 0x661010ac, 0x7a8a378a, 0xf499b342, 0x973256e2, 0x1229865d, 0x03411f03, 0x7d925789, 0x6399fa28, 0x1859ddb8, 0x83becb6d, 0x3b8322f5, 0xd0d5f97a, 0xc0fcca51, 0x6be07c35, 0x6072bdea, 0xc09b95e6, 0xa40826f8, 0x1fb79832, 0xc401e83d, 0xcff57a0c, 0x834ee1c2, 0x59ab9f78, 0xad6e094a, 0xad2218bb, 0x3bbf864c, 0xbb62b997, 0x23eed3ff, 0xd033de59, 0x031f0ec4, 0xbcfabf99, 0xac63ae10, 0xe4dc4aec, 0x5b98d623, 0x0dad0246, 0xb5884cbb, 0xa39db5c7, 0x3c243f66, 0x5e69dbfb, 0x3384971d, 0x9145a99e, 0x87570b15, 0xd6821205, 0x34fa05cf, 0xff4ac046, 0x8a98f678, 0x72add320, 0x910a51a5, 0xe78b8b93, 0xb28cd243, 0x6a752e76, 0x16b4e6a9, 0x60b5e403, 0xc5c51f70, 0xbee86c57, 0x75a122c3, 0x3b7e773d, 0xfd8ab8ec, 0x72839672, 0x6a713aa4, 0x18fd1c1d, 0x2ae1e7db, 0xa77453d7, 0x01e7e6c4, 0x31b08d49, 0x636c7119, 0x736028e8, 0x75a31941, 0xcf080b2c, 0x4a92a8fb, 0x84f6b87a, 0x4f97e0dc, 0x8a7b11d2, 0x1ce7f369, 0x056f3a69, 0x40393f83, 0xffc98a61, 0x80daf387, 0xc6a757b1, 0xa95790e2, 0x1c76cf02, 0xa1450bba, 0x3a3150e5, 0x378e9844, 0x7c47420d, 0x617d2066, 0x8cbd025e, 0x252260a0, 0xd7ded568, 0x8e5400d7 }; unsigned int AdEncryptPassword(const char* username, const char* password) { unsigned int userlength; unsigned int passlength; unsigned int a = 0; int i; for (i = 0; username[i] != 0; i++) { a += AdRandomNumbers[(i + username[i]) & 0x7ff]; } userlength = i; for (i = 0; password[i] != 0; i++) { a += AdRandomNumbers[(i + password[i] + userlength) & 0x7ff]; } passlength = i; return AdRandomNumbers[(userlength + passlength) & 0x7ff] + a; } static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_num_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); } static struct fmt_tests cq_tests[] = { {"$cq$admin$a9db7ca6", ""}, {"$cq$admin$10200218", "admin"}, {"$cq$admin$4cfb73f2", "password"}, {"$cq$clearquest$a279b184", "clearquest"}, {NULL} }; static int ishex(char *q) { while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return !*q; } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q, *tmpstr; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0; tmpstr = strdup(ciphertext); q = p = &tmpstr[TAG_LENGTH]; p[-1] = 0; p = strrchr(p, '$'); if (!p) goto Err; p += 1; if (strlen(p) != BINARY_SIZE * 2) goto Err; if (!ishex(p)) goto Err; if ((p - q) >= SALT_SIZE || p <= q) goto Err; MEM_FREE(tmpstr); return 1; Err:; MEM_FREE(tmpstr); return 0; } static void *get_salt(char *ciphertext) { static char salt[SALT_SIZE + 1]; char *p, *q; memset(salt, 0, SALT_SIZE); p = ciphertext + TAG_LENGTH; q = strchr(p, '$'); memcpy(salt, p, q - p); return salt; } static void* get_binary(char *ciphertext) { char *p; unsigned int* out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = strrchr(ciphertext, '$') + 1; *out = (unsigned int)strtoul(p, NULL, 16); return out; } static int salt_hash(void *salt) { unsigned char *s = salt; unsigned int hash = 5381; unsigned int len = SALT_SIZE; while (len-- && *s) hash = ((hash << 5) + hash) ^ *s++; return hash & (SALT_HASH_SIZE - 1); } static void set_salt(void *salt) { memcpy(saved_salt, salt, SALT_SIZE); } static void cq_set_key(char *key, int index) { strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { *crypt_key[index] = AdEncryptPassword(saved_salt, saved_key[index]); } return count; } static int cmp_all(void *binary, int count) { int i; for (i = 0; i < count; ++i) { if ((*(unsigned int*) binary) == *(unsigned int*) crypt_key[i]) return 1; } return count; } static int cmp_one(void *binary, int index) { if ((*(unsigned int*) binary) == *(unsigned int*) crypt_key[index]) return 1; return 0; } static int cmp_exact(char *source, int index) { return 1; } static int get_hash_0(int index) { return crypt_key[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_key[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_key[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_key[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_key[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_key[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_key[index][0] & 0x7ffffff; } struct fmt_main fmt_cq = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif cq_tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, set_salt, cq_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif
cones.c
#include "cones.h" #include "linalg.h" #include "scs.h" #include "scs_blas.h" /* contains BLAS(X) macros and type info */ #include "util.h" #define CONE_TOL (1e-9) #define CONE_THRESH (1e-8) #define EXP_CONE_MAX_ITERS (100) #define BOX_CONE_MAX_ITERS (25) #define POW_CONE_MAX_ITERS (20) /* Box cone limits (+ or -) taken to be INF */ #define MAX_BOX_VAL (1e15) #ifdef USE_LAPACK #ifdef __cplusplus extern "C" { #endif void BLAS(syev)(const char *jobz, const char *uplo, blas_int *n, scs_float *a, blas_int *lda, scs_float *w, scs_float *work, blas_int *lwork, blas_int *info); blas_int BLAS(syrk)(const char *uplo, const char *trans, const blas_int *n, const blas_int *k, const scs_float *alpha, const scs_float *a, const blas_int *lda, const scs_float *beta, scs_float *c, const blas_int *ldc); void BLAS(scal)(const blas_int *n, const scs_float *sa, scs_float *sx, const blas_int *incx); #ifdef __cplusplus } #endif #endif void SCS(free_cone)(ScsCone *k) { if (k) { if (k->bu) scs_free(k->bu); if (k->bl) scs_free(k->bl); if (k->q) scs_free(k->q); if (k->s) scs_free(k->s); if (k->p) scs_free(k->p); scs_free(k); } } void SCS(deep_copy_cone)(ScsCone *dest, const ScsCone *src) { memcpy(dest, src, sizeof(ScsCone)); /* copy bu, bl */ if (src->bsize > 1) { dest->bu = (scs_float *)scs_calloc(src->bsize - 1, sizeof(scs_float)); memcpy(dest->bu, src->bu, (src->bsize - 1) * sizeof(scs_float)); dest->bl = (scs_float *)scs_calloc(src->bsize - 1, sizeof(scs_float)); memcpy(dest->bl, src->bl, (src->bsize - 1) * sizeof(scs_float)); } else { dest->bu = SCS_NULL; dest->bl = SCS_NULL; } /* copy SOC */ if (src->qsize > 0) { dest->q = (scs_int *)scs_calloc(src->qsize, sizeof(scs_int)); memcpy(dest->q, src->q, src->qsize * sizeof(scs_int)); } else { dest->q = SCS_NULL; } /* copy PSD cone */ if (src->ssize > 0) { dest->s = (scs_int *)scs_calloc(src->ssize, sizeof(scs_int)); memcpy(dest->s, src->s, src->ssize * sizeof(scs_int)); } else { dest->s = SCS_NULL; } /* copy power cone */ if (src->psize > 0) { dest->p = (scs_float *)scs_calloc(src->psize, sizeof(scs_float)); memcpy(dest->p, src->p, src->psize * sizeof(scs_float)); } else { dest->p = SCS_NULL; } } /* set the vector of rho y terms, based on scale and cones */ void SCS(set_r_y)(const ScsConeWork *c, scs_float scale, scs_float *r_y) { scs_int i; /* z cone */ for (i = 0; i < c->k->z; ++i) { /* set rho_y small for z, similar to rho_x term, since z corresponds to * dual free cone, this effectively decreases penalty on those entries * and lets them be determined almost entirely by the linear system solve */ r_y[i] = 1.0 / (1000. * scale); } /* others */ for (i = c->k->z; i < c->m; ++i) { r_y[i] = 1.0 / scale; } } /* the function f aggregates the entries within each cone */ void SCS(enforce_cone_boundaries)(const ScsConeWork *c, scs_float *vec, scs_float (*f)(const scs_float *, scs_int)) { scs_int i, j, delta; scs_int count = c->cone_boundaries[0]; scs_float wrk; for (i = 1; i < c->cone_boundaries_len; ++i) { delta = c->cone_boundaries[i]; wrk = f(&(vec[count]), delta); for (j = count; j < count + delta; ++j) { vec[j] = wrk; } count += delta; } } static inline scs_int get_sd_cone_size(scs_int s) { return (s * (s + 1)) / 2; } /* * boundaries will contain array of indices of rows of A corresponding to * cone boundaries, boundaries[0] is starting index for cones of size strictly * larger than 1, boundaries malloc-ed here so should be freed. */ void set_cone_boundaries(const ScsCone *k, ScsConeWork *c) { scs_int i, s_cone_sz, count = 0; scs_int cone_boundaries_len = 1 + k->qsize + k->ssize + k->ed + k->ep + k->psize; scs_int *b = (scs_int *)scs_calloc(cone_boundaries_len, sizeof(scs_int)); /* cones that can be scaled independently */ b[count] = k->z + k->l + k->bsize; count += 1; /* started at 0 now move to first entry */ for (i = 0; i < k->qsize; ++i) { b[count + i] = k->q[i]; } count += k->qsize; for (i = 0; i < k->ssize; ++i) { s_cone_sz = get_sd_cone_size(k->s[i]); b[count + i] = s_cone_sz; } count += k->ssize; /* add ssize here not ssize * (ssize + 1) / 2 */ /* exp cones */ for (i = 0; i < k->ep + k->ed; ++i) { b[count + i] = 3; } count += k->ep + k->ed; /* power cones */ for (i = 0; i < k->psize; ++i) { b[count + i] = 3; } count += k->psize; /* other cones */ c->cone_boundaries = b; c->cone_boundaries_len = cone_boundaries_len; } static scs_int get_full_cone_dims(const ScsCone *k) { scs_int i, c = k->z + k->l + k->bsize; if (k->qsize) { for (i = 0; i < k->qsize; ++i) { c += k->q[i]; } } if (k->ssize) { for (i = 0; i < k->ssize; ++i) { c += get_sd_cone_size(k->s[i]); } } if (k->ed) { c += 3 * k->ed; } if (k->ep) { c += 3 * k->ep; } if (k->psize) { c += 3 * k->psize; } return c; } scs_int SCS(validate_cones)(const ScsData *d, const ScsCone *k) { scs_int i; if (get_full_cone_dims(k) != d->m) { scs_printf("cone dimensions %li not equal to num rows in A = m = %li\n", (long)get_full_cone_dims(k), (long)d->m); return -1; } if (k->z && k->z < 0) { scs_printf("free cone dimension error\n"); return -1; } if (k->l && k->l < 0) { scs_printf("lp cone dimension error\n"); return -1; } if (k->bsize) { if (k->bsize < 0) { scs_printf("box cone dimension error\n"); return -1; } for (i = 0; i < k->bsize - 1; ++i) { if (k->bl[i] > k->bu[i]) { scs_printf("infeasible: box lower bound larger than upper bound\n"); return -1; } } } if (k->qsize && k->q) { if (k->qsize < 0) { scs_printf("soc cone dimension error\n"); return -1; } for (i = 0; i < k->qsize; ++i) { if (k->q[i] < 0) { scs_printf("soc cone dimension error\n"); return -1; } } } if (k->ssize && k->s) { if (k->ssize < 0) { scs_printf("sd cone dimension error\n"); return -1; } for (i = 0; i < k->ssize; ++i) { if (k->s[i] < 0) { scs_printf("sd cone dimension error\n"); return -1; } } } if (k->ed && k->ed < 0) { scs_printf("ep cone dimension error\n"); return -1; } if (k->ep && k->ep < 0) { scs_printf("ed cone dimension error\n"); return -1; } if (k->psize && k->p) { if (k->psize < 0) { scs_printf("power cone dimension error\n"); return -1; } for (i = 0; i < k->psize; ++i) { if (k->p[i] < -1 || k->p[i] > 1) { scs_printf("power cone error, values must be in [-1,1]\n"); return -1; } } } return 0; } void SCS(finish_cone)(ScsConeWork *c) { #ifdef USE_LAPACK if (c->Xs) { scs_free(c->Xs); } if (c->Z) { scs_free(c->Z); } if (c->e) { scs_free(c->e); } if (c->work) { scs_free(c->work); } #endif if (c->cone_boundaries) { scs_free(c->cone_boundaries); } if (c->s) { scs_free(c->s); } if (c) { scs_free(c); } } char *SCS(get_cone_header)(const ScsCone *k) { char *tmp = (char *)scs_malloc(sizeof(char) * 512); scs_int i, soc_vars, sd_vars; sprintf(tmp, "cones: "); if (k->z) { sprintf(tmp + strlen(tmp), "\t z: primal zero / dual free vars: %li\n", (long)k->z); } if (k->l) { sprintf(tmp + strlen(tmp), "\t l: linear vars: %li\n", (long)k->l); } if (k->bsize) { sprintf(tmp + strlen(tmp), "\t b: box cone vars: %li\n", (long)(k->bsize)); } soc_vars = 0; if (k->qsize && k->q) { for (i = 0; i < k->qsize; i++) { soc_vars += k->q[i]; } sprintf(tmp + strlen(tmp), "\t q: soc vars: %li, qsize: %li\n", (long)soc_vars, (long)k->qsize); } sd_vars = 0; if (k->ssize && k->s) { for (i = 0; i < k->ssize; i++) { sd_vars += get_sd_cone_size(k->s[i]); } sprintf(tmp + strlen(tmp), "\t s: psd vars: %li, ssize: %li\n", (long)sd_vars, (long)k->ssize); } if (k->ep || k->ed) { sprintf(tmp + strlen(tmp), "\t e: exp vars: %li, dual exp vars: %li\n", (long)(3 * k->ep), (long)(3 * k->ed)); } if (k->psize && k->p) { sprintf(tmp + strlen(tmp), "\t p: primal + dual power vars: %li\n", (long)(3 * k->psize)); } return tmp; } static scs_float exp_newton_one_d(scs_float rho, scs_float y_hat, scs_float z_hat, scs_float w) { scs_float t_prev, t = MAX(w - z_hat, MAX(-z_hat, 1e-9)); scs_float f = 1., fp = 1.; scs_int i; for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { t_prev = t; f = t * (t + z_hat) / rho / rho - y_hat / rho + log(t / rho) + 1; fp = (2 * t + z_hat) / rho / rho + 1 / t; t = t - f / fp; if (t <= -z_hat) { t = -z_hat; break; } else if (t <= 0) { t = 0; break; } else if (ABS(t - t_prev) < CONE_TOL) { break; } else if (SQRTF(f * f / fp) < CONE_TOL) { break; } } if (i == EXP_CONE_MAX_ITERS) { scs_printf("warning: exp cone newton step hit maximum %i iters\n", (int)i); scs_printf("rho=%1.5e; y_hat=%1.5e; z_hat=%1.5e; w=%1.5e; f=%1.5e, " "fp=%1.5e, t=%1.5e, t_prev= %1.5e\n", rho, y_hat, z_hat, w, f, fp, t, t_prev); } return t + z_hat; } static void exp_solve_for_x_with_rho(const scs_float *v, scs_float *x, scs_float rho, scs_float w) { x[2] = exp_newton_one_d(rho, v[1], v[2], w); x[1] = (x[2] - v[2]) * x[2] / rho; x[0] = v[0] - rho; } static scs_float exp_calc_grad(const scs_float *v, scs_float *x, scs_float rho, scs_float w) { exp_solve_for_x_with_rho(v, x, rho, w); if (x[1] <= 1e-12) { return x[0]; } return x[0] + x[1] * log(x[1] / x[2]); } static void exp_get_rho_ub(const scs_float *v, scs_float *x, scs_float *ub, scs_float *lb) { *lb = 0; *ub = 0.125; while (exp_calc_grad(v, x, *ub, v[1]) > 0) { *lb = *ub; (*ub) *= 2; } } /* project onto the exponential cone, v has dimension *exactly* 3 */ static scs_int proj_exp_cone(scs_float *v) { scs_int i; scs_float ub, lb, rho, g, x[3]; scs_float r = v[0], s = v[1], t = v[2]; /* v in cl(Kexp) */ if ((s * exp(r / s) - t <= CONE_THRESH && s > 0) || (r <= 0 && s == 0 && t >= 0)) { return 0; } /* -v in Kexp^* */ if ((r > 0 && r * exp(s / r) + exp(1) * t <= CONE_THRESH) || (r == 0 && s <= 0 && t <= 0)) { memset(v, 0, 3 * sizeof(scs_float)); return 0; } /* special case with analytical solution */ if (r < 0 && s < 0) { v[1] = 0.0; v[2] = MAX(v[2], 0); return 0; } /* iterative procedure to find projection, bisects on dual variable: */ exp_get_rho_ub(v, x, &ub, &lb); /* get starting upper and lower bounds */ for (i = 0; i < EXP_CONE_MAX_ITERS; ++i) { rho = (ub + lb) / 2; /* halfway between upper and lower bounds */ g = exp_calc_grad(v, x, rho, x[1]); /* calculates gradient wrt dual var */ if (g > 0) { lb = rho; } else { ub = rho; } if (ub - lb < CONE_TOL) { break; } } #if VERBOSITY > 10 scs_printf("exponential cone proj iters %i\n", (int)i); #endif if (i == EXP_CONE_MAX_ITERS) { scs_printf("warning: exp cone outer step hit maximum %i iters\n", (int)i); scs_printf("r=%1.5e; s=%1.5e; t=%1.5e\n", r, s, t); } v[0] = x[0]; v[1] = x[1]; v[2] = x[2]; return 0; } static scs_int set_up_sd_cone_work_space(ScsConeWork *c, const ScsCone *k) { scs_int i; #ifdef USE_LAPACK blas_int n_max = 0; blas_int neg_one = -1; blas_int info = 0; scs_float wkopt = 0.0; #if VERBOSITY > 0 #define _STR_EXPAND(tok) #tok #define _STR(tok) _STR_EXPAND(tok) scs_printf("BLAS(func) = '%s'\n", _STR(BLAS(func))); #endif /* eigenvector decomp workspace */ for (i = 0; i < k->ssize; ++i) { if (k->s[i] > n_max) { n_max = (blas_int)k->s[i]; } } c->Xs = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float)); c->Z = (scs_float *)scs_calloc(n_max * n_max, sizeof(scs_float)); c->e = (scs_float *)scs_calloc(n_max, sizeof(scs_float)); /* workspace query */ BLAS(syev) ("Vectors", "Lower", &n_max, c->Xs, &n_max, SCS_NULL, &wkopt, &neg_one, &info); if (info != 0) { scs_printf("FATAL: syev failure, info = %li\n", (long)info); return -1; } c->lwork = (blas_int)(wkopt + 1); /* +1 for int casting safety */ c->work = (scs_float *)scs_calloc(c->lwork, sizeof(scs_float)); if (!c->Xs || !c->Z || !c->e || !c->work) { return -1; } return 0; #else for (i = 0; i < k->ssize; i++) { if (k->s[i] > 1) { scs_printf( "FATAL: Cannot solve SDPs without linked blas+lapack libraries\n"); scs_printf( "Install blas+lapack and re-compile SCS with blas+lapack library " "locations\n"); return -1; } } return 0; #endif } /* size of X is get_sd_cone_size(n) */ static scs_int proj_semi_definite_cone(scs_float *X, const scs_int n, ScsConeWork *c) { /* project onto the positive semi-definite cone */ #ifdef USE_LAPACK scs_int i, first_idx; blas_int nb = (blas_int)n; blas_int ncols_z; blas_int nb_plus_one = (blas_int)(n + 1); blas_int one_int = 1; scs_float zero = 0., one = 1.; scs_float sqrt2 = SQRTF(2.0); scs_float sqrt2_inv = 1.0 / sqrt2; scs_float *Xs = c->Xs; scs_float *Z = c->Z; scs_float *e = c->e; scs_float *work = c->work; blas_int lwork = c->lwork; blas_int info = 0; scs_float sq_eig_pos; #endif if (n == 0) { return 0; } if (n == 1) { X[0] = MAX(X[0], 0.); return 0; } #ifdef USE_LAPACK /* copy lower triangular matrix into full matrix */ for (i = 0; i < n; ++i) { memcpy(&(Xs[i * (n + 1)]), &(X[i * n - ((i - 1) * i) / 2]), (n - i) * sizeof(scs_float)); } /* rescale so projection works, and matrix norm preserved see http://www.seas.ucla.edu/~vandenbe/publications/mlbook.pdf pg 3 */ /* scale diags by sqrt(2) */ BLAS(scal)(&nb, &sqrt2, Xs, &nb_plus_one); /* not n_squared */ /* Solve eigenproblem, reuse workspaces */ BLAS(syev)("Vectors", "Lower", &nb, Xs, &nb, e, work, &lwork, &info); if (info != 0) { scs_printf("WARN: LAPACK syev error, info = %i\n", (int)info); if (info < 0) { return info; } } first_idx = -1; /* e is eigvals in ascending order, find first entry > 0 */ for (i = 0; i < n; ++i) { if (e[i] > 0) { first_idx = i; break; } } if (first_idx == -1) { /* there are no positive eigenvalues, set X to 0 and return */ memset(X, 0, sizeof(scs_float) * get_sd_cone_size(n)); return 0; } /* Z is matrix of eigenvectors with positive eigenvalues */ memcpy(Z, &Xs[first_idx * n], sizeof(scs_float) * n * (n - first_idx)); /* scale Z by sqrt(eig) */ for (i = first_idx; i < n; ++i) { sq_eig_pos = SQRTF(e[i]); BLAS(scal)(&nb, &sq_eig_pos, &Z[(i - first_idx) * n], &one_int); } /* Xs = Z Z' = V E V' */ ncols_z = (blas_int)(n - first_idx); BLAS(syrk)("Lower", "NoTrans", &nb, &ncols_z, &one, Z, &nb, &zero, Xs, &nb); /* undo rescaling: scale diags by 1/sqrt(2) */ BLAS(scal)(&nb, &sqrt2_inv, Xs, &nb_plus_one); /* not n_squared */ /* extract just lower triangular matrix */ for (i = 0; i < n; ++i) { memcpy(&(X[i * n - ((i - 1) * i) / 2]), &(Xs[i * (n + 1)]), (n - i) * sizeof(scs_float)); } return 0; #else scs_printf("FAILURE: solving SDP but no blas/lapack libraries were found!\n"); scs_printf("SCS will return nonsense!\n"); SCS(scale_array)(X, NAN, n); return -1; #endif } static scs_float pow_calc_x(scs_float r, scs_float xh, scs_float rh, scs_float a) { scs_float x = 0.5 * (xh + SQRTF(xh * xh + 4 * a * (rh - r) * r)); return MAX(x, 1e-12); } static scs_float pow_calcdxdr(scs_float x, scs_float xh, scs_float rh, scs_float r, scs_float a) { return a * (rh - 2 * r) / (2 * x - xh); } static scs_float pow_calc_f(scs_float x, scs_float y, scs_float r, scs_float a) { return POWF(x, a) * POWF(y, (1 - a)) - r; } static scs_float pow_calc_fp(scs_float x, scs_float y, scs_float dxdr, scs_float dydr, scs_float a) { return POWF(x, a) * POWF(y, (1 - a)) * (a * dxdr / x + (1 - a) * dydr / y) - 1; } /* * Routine to scale the limits of the box cone by the scaling diagonal mat D > 0 * * want (t, s) \in K <==> (t', s') \in K' * * (t', s') = (d0 * t, D s) (overloading D to mean D[1:]) * (up to scalar scaling factor which we can ignore due to conic prooperty) * * K = { (t, s) | t * l <= s <= t * u, t >= 0 } => * { (t, s) | d0 * t * D l / d0 <= D s <= d0 * t D u / d0, t >= 0 } => * { (t', s') | t' * l' <= s' <= t' u', t >= 0 } = K' * where l' = D l / d0, u' = D u / d0. */ static void normalize_box_cone(ScsCone *k, scs_float *D, scs_int bsize) { scs_int j; for (j = 0; j < bsize - 1; j++) { if (k->bu[j] >= MAX_BOX_VAL) { k->bu[j] = INFINITY; } else { k->bu[j] = D ? D[j + 1] * k->bu[j] / D[0] : k->bu[j]; } if (k->bl[j] <= -MAX_BOX_VAL) { k->bl[j] = -INFINITY; } else { k->bl[j] = D ? D[j + 1] * k->bl[j] / D[0] : k->bl[j]; } } } /* Project onto { (t, s) | t * l <= s <= t * u, t >= 0 }, Newton's method on t tx = [t; s], total length = bsize, under Euclidean metric 1/r_box. */ static scs_float proj_box_cone(scs_float *tx, const scs_float *bl, const scs_float *bu, scs_int bsize, scs_float t_warm_start, scs_float *r_box) { scs_float *x, gt, ht, t_prev, t = t_warm_start; scs_float rho_t = 1, *rho = SCS_NULL, r; scs_int iter, j; if (bsize == 1) { /* special case */ tx[0] = MAX(tx[0], 0.0); return tx[0]; } x = &(tx[1]); if (r_box) { rho_t = 1.0 / r_box[0]; rho = &(r_box[1]); } /* should only require about 5 or so iterations, 1 or 2 if warm-started */ for (iter = 0; iter < BOX_CONE_MAX_ITERS; iter++) { t_prev = t; gt = rho_t * (t - tx[0]); /* gradient */ ht = rho_t; /* hessian */ for (j = 0; j < bsize - 1; j++) { r = rho ? 1.0 / rho[j] : 1.; if (x[j] > t * bu[j]) { gt += r * (t * bu[j] - x[j]) * bu[j]; /* gradient */ ht += r * bu[j] * bu[j]; /* hessian */ } else if (x[j] < t * bl[j]) { gt += r * (t * bl[j] - x[j]) * bl[j]; /* gradient */ ht += r * bl[j] * bl[j]; /* hessian */ } } t = MAX(t - gt / MAX(ht, 1e-8), 0.); /* newton step */ #if VERBOSITY > 3 scs_printf("iter %i, t_new %1.3e, t_prev %1.3e, gt %1.3e, ht %1.3e\n", iter, t, t_prev, gt, ht); scs_printf("ABS(gt / (ht + 1e-6)) %.4e, ABS(t - t_prev) %.4e\n", ABS(gt / (ht + 1e-6)), ABS(t - t_prev)); #endif /* TODO: sometimes this check can fail (ie, declare convergence before it * should) if ht is very large, which can happen with some pathological * problems. */ if (ABS(gt / MAX(ht, 1e-6)) < 1e-12 * MAX(t, 1.) || ABS(t - t_prev) < 1e-11 * MAX(t, 1.)) { break; } } if (iter == BOX_CONE_MAX_ITERS) { scs_printf("warning: box cone proj hit maximum %i iters\n", (int)iter); } for (j = 0; j < bsize - 1; j++) { if (x[j] > t * bu[j]) { x[j] = t * bu[j]; } else if (x[j] < t * bl[j]) { x[j] = t * bl[j]; } /* x[j] unchanged otherwise */ } tx[0] = t; #if VERBOSITY > 3 scs_printf("box cone iters %i\n", (int)iter + 1); #endif return t; } /* project onto SOC of size q*/ static void proj_soc(scs_float *x, scs_int q) { if (q == 0) { return; } if (q == 1) { x[0] = MAX(x[0], 0.); return; } scs_float v1 = x[0]; scs_float s = SCS(norm_2)(&(x[1]), q - 1); scs_float alpha = (s + v1) / 2.0; if (s <= v1) { return; } else if (s <= -v1) { memset(&(x[0]), 0, q * sizeof(scs_float)); } else { x[0] = alpha; SCS(scale_array)(&(x[1]), alpha / s, q - 1); } } static void proj_power_cone(scs_float *v, scs_float a) { scs_float xh = v[0], yh = v[1], rh = ABS(v[2]); scs_float x = 0.0, y = 0.0, r; scs_int i; /* v in K_a */ if (xh >= 0 && yh >= 0 && CONE_THRESH + POWF(xh, a) * POWF(yh, (1 - a)) >= rh) { return; } /* -v in K_a^* */ if (xh <= 0 && yh <= 0 && CONE_THRESH + POWF(-xh, a) * POWF(-yh, 1 - a) >= rh * POWF(a, a) * POWF(1 - a, 1 - a)) { v[0] = v[1] = v[2] = 0; return; } r = rh / 2; for (i = 0; i < POW_CONE_MAX_ITERS; ++i) { scs_float f, fp, dxdr, dydr; x = pow_calc_x(r, xh, rh, a); y = pow_calc_x(r, yh, rh, 1 - a); f = pow_calc_f(x, y, r, a); if (ABS(f) < CONE_TOL) { break; } dxdr = pow_calcdxdr(x, xh, rh, r, a); dydr = pow_calcdxdr(y, yh, rh, r, (1 - a)); fp = pow_calc_fp(x, y, dxdr, dydr, a); r = MAX(r - f / fp, 0); r = MIN(r, rh); } v[0] = x; v[1] = y; v[2] = (v[2] < 0) ? -(r) : (r); } /* project onto the primal K cone in the paper */ /* the r_y vector determines the INVERSE metric, ie, project under the * diag(r_y)^-1 norm. */ static scs_int proj_cone(scs_float *x, const ScsCone *k, ScsConeWork *c, scs_int normalize, scs_float *r_y) { scs_int i, status; scs_int count = 0; scs_float *r_box = SCS_NULL; if (k->z) { /* doesn't use r_y */ /* project onto primal zero / dual free cone */ memset(x, 0, k->z * sizeof(scs_float)); count += k->z; } if (k->l) { /* doesn't use r_y */ /* project onto positive orthant */ for (i = count; i < count + k->l; ++i) { x[i] = MAX(x[i], 0.0); } count += k->l; } if (k->bsize) { /* DOES use r_y */ if (r_y) { r_box = &(r_y[count]); } /* project onto box cone */ c->box_t_warm_start = proj_box_cone(&(x[count]), k->bl, k->bu, k->bsize, c->box_t_warm_start, r_box); count += k->bsize; /* since b = (t,s), len(s) = bsize - 1 */ } if (k->qsize && k->q) { /* doesn't use r_y */ /* project onto second-order cones */ for (i = 0; i < k->qsize; ++i) { proj_soc(&(x[count]), k->q[i]); count += k->q[i]; } } if (k->ssize && k->s) { /* doesn't use r_y */ /* project onto PSD cones */ for (i = 0; i < k->ssize; ++i) { status = proj_semi_definite_cone(&(x[count]), k->s[i], c); if (status < 0) { return status; } count += get_sd_cone_size(k->s[i]); } } if (k->ep) { /* doesn't use r_y */ /* * exponential cone is not self dual, if s \in K * then y \in K^* and so if K is the primal cone * here we project onto K^*, via Moreau * \Pi_C^*(y) = y + \Pi_C(-y) */ #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < k->ep; ++i) { proj_exp_cone(&(x[count + 3 * i])); } count += 3 * k->ep; } /* dual exponential cone */ if (k->ed) { /* doesn't use r_y */ /* * exponential cone is not self dual, if s \in K * then y \in K^* and so if K is the primal cone * here we project onto K^*, via Moreau * \Pi_C^*(y) = y + \Pi_C(-y) */ scs_int idx; scs_float r, s, t; SCS(scale_array)(&(x[count]), -1, 3 * k->ed); /* x = -x; */ #ifdef _OPENMP #pragma omp parallel for private(r, s, t, idx) #endif for (i = 0; i < k->ed; ++i) { idx = count + 3 * i; r = x[idx]; s = x[idx + 1]; t = x[idx + 2]; proj_exp_cone(&(x[idx])); x[idx] -= r; x[idx + 1] -= s; x[idx + 2] -= t; } count += 3 * k->ed; } if (k->psize && k->p) { /* doesn't use r_y */ scs_float v[3]; scs_int idx; /* don't use openmp for power cone ifdef _OPENMP pragma omp parallel for private(v, idx) endif */ for (i = 0; i < k->psize; ++i) { /* doesn't use r_y */ idx = count + 3 * i; if (k->p[i] >= 0) { /* primal power cone */ proj_power_cone(&(x[idx]), k->p[i]); } else { /* dual power cone, using Moreau */ v[0] = -x[idx]; v[1] = -x[idx + 1]; v[2] = -x[idx + 2]; proj_power_cone(v, -k->p[i]); x[idx] += v[0]; x[idx + 1] += v[1]; x[idx + 2] += v[2]; } } count += 3 * k->psize; } /* project onto OTHER cones */ return 0; } ScsConeWork *SCS(init_cone)(ScsCone *k, scs_int m) { ScsConeWork *c = (ScsConeWork *)scs_calloc(1, sizeof(ScsConeWork)); c->k = k; c->m = m; c->scaled_cones = 0; set_cone_boundaries(k, c); c->s = (scs_float *)scs_calloc(m, sizeof(scs_float)); if (k->ssize && k->s) { if (set_up_sd_cone_work_space(c, k) < 0) { SCS(finish_cone)(c); return SCS_NULL; } } return c; } void scale_box_cone(ScsCone *k, ScsConeWork *c, ScsScaling *scal) { if (k->bsize && k->bu && k->bl) { c->box_t_warm_start = 1.; if (scal) { /* also does some sanitizing */ normalize_box_cone(k, &(scal->D[k->z + k->l]), k->bsize); } } } /* Outward facing cone projection routine, performs projection in-place. If normalize > 0 then will use normalized (equilibrated) cones if applicable. Moreau decomposition for R-norm projections: `x + R^{-1} \Pi_{C^*}^{R^{-1}} ( - R x ) = \Pi_C^R ( x )` where \Pi^R_C is the projection onto C under the R-norm: `||x||_R = \sqrt{x ' R x}`. */ scs_int SCS(proj_dual_cone)(scs_float *x, ScsConeWork *c, ScsScaling *scal, scs_float *r_y) { scs_int status, i; ScsCone *k = c->k; if (!c->scaled_cones) { scale_box_cone(k, c, scal); c->scaled_cones = 1; } /* copy s = x */ memcpy(c->s, x, c->m * sizeof(scs_float)); /* x -> - Rx */ for (i = 0; i < c->m; ++i) { x[i] *= r_y ? -r_y[i] : -1; } /* project -x onto cone, x -> \Pi_{C^*}^{R^{-1}}(-x) under r_y metric */ status = proj_cone(x, k, c, scal ? 1 : 0, r_y); /* return x + R^{-1} \Pi_{C^*}^{R^{-1}} ( -x ) */ for (i = 0; i < c->m; ++i) { if (r_y) { x[i] = x[i] / r_y[i] + c->s[i]; } else { x[i] += c->s[i]; } } return status; }
util_display.h
/* Copyright (c) 2016, TU Dresden All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the TU Dresden nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TU DRESDEN BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include "types.h" #include "properties.h" #include "regression_tree.h" #include "combined_feature.h" #include "Hypothesis.h" /** Utility functions for displaying custom image types and for vizualization of results. */ /** * @brief Convert a object coordinate image to an BGR image. * * Object coordinates are mapped to the RGB cube. Min and max values of object * coordinate are taken from the info file (object extent). * * @param img Object coordinate image to display. * @param info Info type of the object the object coordinates belong to. * @return cv::Mat 8bit BGR vizualization of the object coordinates. */ cv::Mat convertForDisplay(const jp::img_coord_t& img, jp::info_t info) { cv::Mat result(img.size(), CV_8UC3); for(int x = 0; x < img.cols; x++) for(int y = 0; y < img.rows; y++) for(int channel = 0; channel < 3; channel++) { float maxExtent = info.extent(channel) * 1000.f; // in meters int coord = (int) (img(y, x)(channel) + maxExtent / 2.f); // shift zero point so all values are positive result.at<cv::Vec3b>(y, x)[channel] = (uchar) ((coord / maxExtent) * 255); // rescale to RGB range } return result; } /** * @brief Convert a depth image to a grayscale image. * * The method determines minimal and maximal depth of the given image and maps this range to grayscale. * * @param img Depth image to display. * @return cv::Mat 8bit grayscale vizualization of the object coordinates. */ cv::Mat convertForDisplay(const jp::img_depth_t& img) { //determine min and max depth first jp::depth_t minDepth = 10000; jp::depth_t maxDepth = 0; for(int x = 0; x < img.cols; x++) for(int y = 0; y < img.rows; y++) { maxDepth = std::max(maxDepth, img(y, x)); if(img(y, x) > 0) //skip pixels without depth minDepth = std::min(minDepth, img(y, x)); } cv::Mat result(img.size(), CV_8U); // map depth values to gray value using the min max range for(int x = 0; x < img.cols; x++) for(int y = 0; y < img.rows; y++) { if(img(y, x) == 0) result.at<uchar>(y, x) = 0; // depth holes are mapped to black else result.at<uchar>(y, x) = 255 - (uchar) (((img(y, x) - minDepth) / (float) (maxDepth - minDepth)) * 255); } return result; } /** * @brief Draws the object bounding box using the given pose. * * @param img Input image. * @param trans Object pose. * @param bb3D The 3D object bounding box. * @param color Color of the bounding box to draw. * @return void */ void drawBB(jp::img_bgr_t& img, const jp::cv_trans_t& trans, const std::vector<cv::Point3f>& bb3D, const cv::Scalar& color) { std::vector<cv::Point2f> bb2D; // project 3D bounding box vertices into the image cv::Mat_<float> camMat = GlobalProperties::getInstance()->getCamMat(); cv::projectPoints(bb3D, trans.first, trans.second, camMat, cv::Mat(), bb2D); int lineW = 2; // line width int lineType = CV_AA; //anti aliasing // draw sligthly bigger black line underneath the colored line for a nice halo effect cv::line(img, bb2D[0], bb2D[1], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[1], bb2D[3], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[3], bb2D[2], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[2], bb2D[0], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[2], bb2D[6], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[0], bb2D[4], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[1], bb2D[5], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[3], bb2D[7], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[4], bb2D[5], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[5], bb2D[7], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[7], bb2D[6], cv::Scalar(0, 0, 0), lineW+2, lineType); cv::line(img, bb2D[6], bb2D[4], cv::Scalar(0, 0, 0), lineW+2, lineType); // draw colored line cv::line(img, bb2D[0], bb2D[1], color, lineW, lineType); cv::line(img, bb2D[1], bb2D[3], color, lineW, lineType); cv::line(img, bb2D[3], bb2D[2], color, lineW, lineType); cv::line(img, bb2D[2], bb2D[0], color, lineW, lineType); cv::line(img, bb2D[2], bb2D[6], color, lineW, lineType); cv::line(img, bb2D[0], bb2D[4], color, lineW, lineType); cv::line(img, bb2D[1], bb2D[5], color, lineW, lineType); cv::line(img, bb2D[3], bb2D[7], color, lineW, lineType); cv::line(img, bb2D[4], bb2D[5], color, lineW, lineType); cv::line(img, bb2D[5], bb2D[7], color, lineW, lineType); cv::line(img, bb2D[7], bb2D[6], color, lineW, lineType); cv::line(img, bb2D[6], bb2D[4], color, lineW, lineType); } /** * @brief Draw a list of object bounding boxes using ground truth information. * * The method skips objects not visible according to the respective info file. * * @param img Input image. * @param infos Ground truth information about the objects. Contains pose and visibility information. * @param bb3Ds The 3D object bounding boxes. Same count as infos. * @param colors Colors of the bounding box to draw. Same count as infos. * @return void */ void drawBBs( jp::img_bgr_t& img, const std::vector<jp::info_t>& infos, const std::vector<std::vector<cv::Point3f>>& bb3Ds, const std::vector<cv::Scalar>& colors) { for(unsigned o = 0; o < infos.size(); o++) { if(!infos[o].visible) continue; drawBB(img, jp::our2cv(infos[o]), bb3Ds[o], colors[o]); } } /** * @brief Vizualize object coordinate prediction of one tree for a certain object. * * The method extracts for each pixel the dominant object coordinate * prediction (mode with most support) of one tree. * * @param forest Random forest that made the prediction. * @param leafImgs Prediction in the form of leaf indices per pixel. One leaf image for each tree in the forest. * @param info Info of the object the prediction was made for. * @param treeIdx Index of tree to vizualize. the prediction of. * @param objID Object ID of the object the prediction should be vizualized for. * @return cv::Mat BGR vizualization of the object coordinate prediction. */ cv::Mat getModeImg( const std::vector<jp::RegressionTree<jp::feature_t>>& forest, const std::vector<jp::img_leaf_t>& leafImgs, jp::info_t info, int treeIdx, jp::id_t objID) { if(leafImgs.empty()) return cv::Mat(); treeIdx = std::max(0, treeIdx); // use default tree if treeIdx < 0 jp::img_coord_t modeImg(leafImgs[0].rows, leafImgs[0].cols); for(int x = 0; x < modeImg.cols; x++) for(int y = 0; y < modeImg.rows; y++) { size_t leaf = leafImgs[treeIdx](y, x); const std::vector<jp::mode_t>* modes = forest[treeIdx].getModes(leaf, objID); if(modes->empty() || !leaf) modeImg(y, x) = jp::coord3_t(0, 0, 0); // No distribution stored in leaf. else modeImg(y, x) = modes->at(0).mean; } return convertForDisplay(modeImg, info); } /** * @brief Vizualizes the prediction (segmentation and object coordinates) of the forest for all objects. * * The method will create a combined vizualization of the forest prediction for multiple objects. * Prediction of different objects a blended according to the object probabilities predicted for * each pixel. Segmentations of seperate objects are displayed with different colors. The method * with use the prediction of one tree only and the dominant moder per pixel (largest support) * for the vizualition of the object coordinates. * * @param segImg Output parameter, soft segmentation (or probability prediction), merged for all objects. * @param objImg Output parameter, object coordinate prediction, merged for all objects. * @param forest Random forest that made the prediction. * @param leafImgs Prediction in the form of leaf indices per pixel. One leaf image for each tree in the forest. * @param infos Infos about the objects the prediction was made for. One for each object. * @param probabilities Probability predictions of the forest. One for each object. * @param colors Colors to use for each object. One for each object. * @return void */ void drawForestEstimation( jp::img_bgr_t& segImg, jp::img_bgr_t& objImg, const std::vector<jp::RegressionTree<jp::feature_t>>& forest, const std::vector<jp::img_leaf_t>& leafImgs, const std::vector<jp::info_t>& infos, const std::vector<jp::img_stat_t>& probabilities, const std::vector<cv::Scalar>& colors) { std::vector<jp::img_bgr_t> estObjImgs; // get separate object coordinate vizualizations for each obejct. for(unsigned o = 0; o < infos.size(); o++) estObjImgs.push_back(getModeImg(forest, leafImgs, infos[o], 0, o+1)); // blend predictions of all objects #pragma omp parallel for for(unsigned x = 0; x < segImg.cols; x++) for(unsigned y = 0; y < segImg.rows; y++) for(unsigned p = 0; p < probabilities.size(); p++) { float prob = probabilities[p](y, x); // object probability is used for blending the predictions segImg(y, x)[0] += prob * colors[p][0]; segImg(y, x)[1] += prob * colors[p][1]; segImg(y, x)[2] += prob * colors[p][2]; objImg(y, x)[0] += prob * estObjImgs[p](y, x)[0]; objImg(y, x)[1] += prob * estObjImgs[p](y, x)[1]; objImg(y, x)[2] += prob * estObjImgs[p](y, x)[2]; } } /** * @brief Creates a combined vizualization of ground truth object segmentations and object coordinates. * * @param segImg Output parameter. Combined segmentation masks of all objects. Each object segmentation uses a different color. * @param objImg Output parameter. Combined object coordinates ground truth of all objects. * @param gtObjImgs Individual object coordinate ground truth images. One per object. * @param gtSegImgs Individual object segmentation masks. One per object. * @param infos Infos about the objects to do the vizualization for. One per object. * @param colors Object colors to use for the vizualization. One per object. * @return void */ void drawGroundTruth( jp::img_bgr_t& segImg, jp::img_bgr_t& objImg, const std::vector<jp::img_coord_t>& gtObjImgs, const std::vector<jp::img_id_t>& gtSegImgs, const std::vector<jp::info_t>& infos, const std::vector<cv::Scalar>& colors) { for(unsigned o = 0; o < infos.size(); o++) { if(!infos[o].visible) continue; // skip invisible objects jp::img_bgr_t curObjImg = convertForDisplay(gtObjImgs[o], infos[o]); #pragma omp parallel for for(unsigned x = 0; x < segImg.cols; x++) for(unsigned y = 0; y < segImg.rows; y++) { if(!gtSegImgs[o](y, x)) continue; // skip all pixels that do not belong to the object segImg(y, x)[0] = colors[o][0]; segImg(y, x)[1] = colors[o][1]; segImg(y, x)[2] = colors[o][2]; objImg(y, x)[0] = curObjImg(y, x)[0]; objImg(y, x)[1] = curObjImg(y, x)[1]; objImg(y, x)[2] = curObjImg(y, x)[2]; } } } /** * @brief Creates a list of colors to use for vizualization. * * @return std::vector< cv::Scalar, std::allocator< void > > List of colors. */ std::vector<cv::Scalar> getColors() { std::vector<cv::Scalar> colors; colors.push_back(cv::Scalar(255, 0, 0)); colors.push_back(cv::Scalar(0, 255, 0)); colors.push_back(cv::Scalar(0, 0, 255)); colors.push_back(cv::Scalar(255, 255, 0)); colors.push_back(cv::Scalar(255, 0, 255)); colors.push_back(cv::Scalar(0, 255, 255)); colors.push_back(cv::Scalar(255, 127, 0)); colors.push_back(cv::Scalar(255, 0, 127)); colors.push_back(cv::Scalar(255, 127, 127)); colors.push_back(cv::Scalar(127, 255, 0)); colors.push_back(cv::Scalar(0, 255, 127)); colors.push_back(cv::Scalar(127, 255, 127)); colors.push_back(cv::Scalar(127, 0, 255)); colors.push_back(cv::Scalar(0, 127, 255)); colors.push_back(cv::Scalar(127, 127, 255)); for(unsigned c = colors.size(); c < 50; c++) colors.push_back(cv::Scalar(127, 127, 127)); return colors; }
parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel for simd'}} #pragma omp parallel for simd foo void test_no_clause() { int i; #pragma omp parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp parallel for simd' must be a for loop}} #pragma omp parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd linear(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} #pragma omp parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd safelen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen( for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd simdlen() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp parallel // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel #pragma omp parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp parallel for simd', but found only 1}} #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd linear(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd linear(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; int x, y; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd linear(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd linear(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd linear(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be linear}} #pragma omp parallel for simd linear(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as private}} // expected-error@+1 {{private variable cannot be linear}} #pragma omp parallel for simd private(x) linear(x) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be private}} #pragma omp parallel for simd linear(x) private(x) for (i = 0; i < 16; ++i) ; // expected-warning@+1 {{zero linear step (x and other variables in clause should probably be const)}} #pragma omp parallel for simd linear(x, y : 0) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as linear}} // expected-error@+1 {{linear variable cannot be lastprivate}} #pragma omp parallel for simd linear(x) lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-note@+2 {{defined as lastprivate}} // expected-error@+1 {{lastprivate variable cannot be linear}} #pragma omp parallel for simd lastprivate(x) linear(x) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(, for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned() for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} #pragma omp parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp parallel #pragma omp parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp parallel #pragma omp parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp parallel // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ChopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,0,0,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","(\"%.20gx%.20g%+.20g%+.20g\") `%s'", (double) geometry->width,(double) geometry->height, (double) geometry->x,(double) geometry->y,image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CropImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t PixelRoundOffset(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(CastDoubleToLong(floor(x))); return(CastDoubleToLong(ceil(x))); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ crop_image=NewImageList(); width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=PixelRoundOffset((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) PixelRoundOffset((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=PixelRoundOffset((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) PixelRoundOffset((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=(Image *) NULL; crop_image=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image,exception); if (status == MagickFalse) { extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); if (status != MagickFalse) Update8BIMClipPath(extent_image,image->columns,image->rows,geometry); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlipImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { ssize_t i; q-=GetPixelChannels(flop_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,FlopImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel = GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,2) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpliceImageTag,progress, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry, exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; q-=GetPixelChannels(transverse_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { const char *artifact; Image *trim_image; RectangleInfo geometry, page; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha_trait=BlendPixelTrait; crop_image->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } page=geometry; artifact=GetImageArtifact(image,"trim:minSize"); if (artifact != (const char *) NULL) (void) ParseAbsoluteGeometry(artifact,&page); if ((geometry.width < page.width) && (geometry.height < page.height)) { /* Limit trim to a minimum size. */ switch (image->gravity) { case CenterGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } case NorthWestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); geometry.y-=((ssize_t) page.height-geometry.height); break; } case NorthGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; geometry.y-=((ssize_t) page.height-geometry.height); break; } case NorthEastGravity: { geometry.y-=((ssize_t) page.height-geometry.height); break; } case EastGravity: { geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } case SouthEastGravity: break; case SouthGravity: { geometry.x-=((ssize_t) page.width-geometry.width)/2; break; } case SouthWestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); break; } case WestGravity: { geometry.x-=((ssize_t) page.width-geometry.width); geometry.y-=((ssize_t) page.height-geometry.height)/2; break; } default: break; } geometry.width=page.width; geometry.height=page.height; } geometry.x+=image->page.x; geometry.y+=image->page.y; trim_image=CropImage(image,&geometry,exception); if (trim_image != (Image *) NULL) Update8BIMClipPath(trim_image,image->columns,image->rows,&geometry); return(trim_image); }
tree_ensemble_common.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "tree_ensemble_aggregator.h" namespace onnxruntime { namespace ml { namespace detail { template <typename ITYPE, typename OTYPE> class TreeEnsembleCommon { public: int64_t n_targets_or_classes_; protected: std::vector<OTYPE> base_values_; POST_EVAL_TRANSFORM post_transform_; AGGREGATE_FUNCTION aggregate_function_; int64_t n_nodes_; std::vector<TreeNodeElement<OTYPE>> nodes_; std::vector<TreeNodeElement<OTYPE>*> roots_; int64_t max_tree_depth_; int64_t n_trees_; bool same_mode_; bool has_missing_tracks_; int parallel_tree_; // starts parallelizing the computing if n_tree >= parallel_tree_ and n_rows == 1 int parallel_N_; // starts parallelizing the computing if n_rows >= parallel_N_ public: TreeEnsembleCommon(int parallel_tree, int parallel_N, const std::string& aggregate_function, const std::vector<OTYPE>& base_values, int64_t n_targets_or_classes, const std::vector<int64_t>& nodes_falsenodeids, const std::vector<int64_t>& nodes_featureids, const std::vector<OTYPE>& nodes_hitrates, const std::vector<int64_t>& nodes_missing_value_tracks_true, const std::vector<std::string>& nodes_modes, const std::vector<int64_t>& nodes_nodeids, const std::vector<int64_t>& nodes_treeids, const std::vector<int64_t>& nodes_truenodeids, const std::vector<OTYPE>& nodes_values, const std::string& post_transform, const std::vector<int64_t>& target_class_ids, const std::vector<int64_t>& target_class_nodeids, const std::vector<int64_t>& target_class_treeids, const std::vector<OTYPE>& target_class_weights); void compute(const Tensor* X, Tensor* Z, Tensor* label) const; protected: TreeNodeElement<OTYPE>* ProcessTreeNodeLeave( TreeNodeElement<OTYPE>* root, const ITYPE* x_data) const; template <typename AGG> void compute_agg(const Tensor* X, Tensor* Z, Tensor* label, const AGG& agg) const; }; template <typename ITYPE, typename OTYPE> TreeEnsembleCommon<ITYPE, OTYPE>::TreeEnsembleCommon(int parallel_tree, int parallel_N, const std::string& aggregate_function, const std::vector<OTYPE>& base_values, int64_t n_targets_or_classes, const std::vector<int64_t>& nodes_falsenodeids, const std::vector<int64_t>& nodes_featureids, const std::vector<OTYPE>& nodes_hitrates, const std::vector<int64_t>& nodes_missing_value_tracks_true, const std::vector<std::string>& nodes_modes, const std::vector<int64_t>& nodes_nodeids, const std::vector<int64_t>& nodes_treeids, const std::vector<int64_t>& nodes_truenodeids, const std::vector<OTYPE>& nodes_values, const std::string& post_transform, const std::vector<int64_t>& target_class_ids, const std::vector<int64_t>& target_class_nodeids, const std::vector<int64_t>& target_class_treeids, const std::vector<OTYPE>& target_class_weights) { parallel_tree_ = parallel_tree; parallel_N_ = parallel_N; ORT_ENFORCE(n_targets_or_classes > 0); ORT_ENFORCE(nodes_falsenodeids.size() == nodes_featureids.size()); ORT_ENFORCE(nodes_falsenodeids.size() == nodes_modes.size()); ORT_ENFORCE(nodes_falsenodeids.size() == nodes_nodeids.size()); ORT_ENFORCE(nodes_falsenodeids.size() == nodes_treeids.size()); ORT_ENFORCE(nodes_falsenodeids.size() == nodes_truenodeids.size()); ORT_ENFORCE(nodes_falsenodeids.size() == nodes_values.size()); ORT_ENFORCE(target_class_ids.size() == target_class_nodeids.size()); ORT_ENFORCE(target_class_ids.size() == target_class_treeids.size()); ORT_ENFORCE(target_class_ids.size() == target_class_treeids.size()); aggregate_function_ = MakeAggregateFunction(aggregate_function); post_transform_ = MakeTransform(post_transform); base_values_ = base_values; n_targets_or_classes_ = n_targets_or_classes; max_tree_depth_ = 1000; // additional members std::vector<NODE_MODE> cmodes(nodes_modes.size()); same_mode_ = true; int fpos = -1; for (size_t i = 0; i < nodes_modes.size(); ++i) { cmodes[i] = MakeTreeNodeMode(nodes_modes[i]); if (cmodes[i] == NODE_MODE::LEAF) continue; if (fpos == -1) { fpos = static_cast<int>(i); continue; } if (cmodes[i] != cmodes[fpos]) same_mode_ = false; } // filling nodes n_nodes_ = nodes_treeids.size(); nodes_.resize(n_nodes_); roots_.clear(); std::map<TreeNodeElementId, TreeNodeElement<OTYPE>*> idi; size_t i; for (i = 0; i < nodes_treeids.size(); ++i) { TreeNodeElement<OTYPE>& node = nodes_[i]; node.id.tree_id = static_cast<int>(nodes_treeids[i]); node.id.node_id = static_cast<int>(nodes_nodeids[i]); node.feature_id = static_cast<int>(nodes_featureids[i]); node.value = nodes_values[i]; node.hitrates = i < nodes_hitrates.size() ? nodes_hitrates[i] : -1; node.mode = cmodes[i]; node.is_not_leaf = node.mode != NODE_MODE::LEAF; node.truenode = NULL; // nodes_truenodeids[i]; node.falsenode = NULL; // nodes_falsenodeids[i]; node.missing_tracks = i < static_cast<size_t>(nodes_missing_value_tracks_true.size()) ? (nodes_missing_value_tracks_true[i] == 1 ? MissingTrack::TRUE : MissingTrack::FALSE) : MissingTrack::NONE; node.is_missing_track_true = node.missing_tracks == MissingTrack::TRUE; if (idi.find(node.id) != idi.end()) { ORT_THROW("Node ", node.id.node_id, " in tree ", node.id.tree_id, " is already there."); } idi.insert(std::pair<TreeNodeElementId, TreeNodeElement<OTYPE>*>(node.id, &node)); } TreeNodeElementId coor; for (auto it = nodes_.begin(); it != nodes_.end(); ++it, ++i) { if (!it->is_not_leaf) continue; i = std::distance(nodes_.begin(), it); coor.tree_id = it->id.tree_id; coor.node_id = static_cast<int>(nodes_truenodeids[i]); auto found = idi.find(coor); if (found == idi.end()) { ORT_THROW("Unable to find node ", coor.tree_id, "-", coor.node_id, " (truenode)."); } if (coor.node_id >= 0 && coor.node_id < n_nodes_) { it->truenode = found->second; if ((it->truenode->id.tree_id != it->id.tree_id) || (it->truenode->id.node_id == it->id.node_id)) { ORT_THROW("One falsenode is pointing either to itself, either to another tree."); } } else it->truenode = NULL; coor.node_id = static_cast<int>(nodes_falsenodeids[i]); found = idi.find(coor); if (found == idi.end()) { ORT_THROW("Unable to find node ", coor.tree_id, "-", coor.node_id, " (falsenode)."); } if (coor.node_id >= 0 && coor.node_id < n_nodes_) { it->falsenode = found->second; if ((it->falsenode->id.tree_id != it->id.tree_id) || (it->falsenode->id.node_id == it->id.node_id)) { ORT_THROW("One falsenode is pointing either to itself, either to another tree."); } } else it->falsenode = NULL; } int64_t previous = -1; for (i = 0; i < static_cast<size_t>(n_nodes_); ++i) { if ((previous == -1) || (previous != nodes_[i].id.tree_id)) roots_.push_back(&(nodes_[i])); previous = nodes_[i].id.tree_id; } TreeNodeElementId ind; SparseValue<OTYPE> w; for (i = 0; i < target_class_nodeids.size(); i++) { ind.tree_id = static_cast<int>(target_class_treeids[i]); ind.node_id = static_cast<int>(target_class_nodeids[i]); if (idi.find(ind) == idi.end()) { ORT_THROW("Unable to find node ", coor.tree_id, "-", coor.node_id, " (weights)."); } w.i = target_class_ids[i]; w.value = target_class_weights[i]; idi[ind]->weights.push_back(w); } n_trees_ = roots_.size(); has_missing_tracks_ = false; for (auto itm = nodes_missing_value_tracks_true.begin(); itm != nodes_missing_value_tracks_true.end(); ++itm) { if (*itm) { has_missing_tracks_ = true; break; } } } template <typename ITYPE, typename OTYPE> void TreeEnsembleCommon<ITYPE, OTYPE>::compute(const Tensor* X, Tensor* Z, Tensor* label) const { switch (aggregate_function_) { case AGGREGATE_FUNCTION::AVERAGE: compute_agg( X, Z, label, TreeAggregatorAverage<ITYPE, OTYPE>( roots_.size(), n_targets_or_classes_, post_transform_, base_values_)); return; case AGGREGATE_FUNCTION::SUM: compute_agg( X, Z, label, TreeAggregatorSum<ITYPE, OTYPE>( roots_.size(), n_targets_or_classes_, post_transform_, base_values_)); return; case AGGREGATE_FUNCTION::MIN: compute_agg( X, Z, label, TreeAggregatorMin<ITYPE, OTYPE>( roots_.size(), n_targets_or_classes_, post_transform_, base_values_)); return; case AGGREGATE_FUNCTION::MAX: compute_agg( X, Z, label, TreeAggregatorMax<ITYPE, OTYPE>( roots_.size(), n_targets_or_classes_, post_transform_, base_values_)); return; default: ORT_THROW("Unknown aggregation function in TreeEnsemble."); } } template <typename ITYPE, typename OTYPE> template <typename AGG> void TreeEnsembleCommon<ITYPE, OTYPE>::compute_agg(const Tensor* X, Tensor* Z, Tensor* label, const AGG& agg) const { int64_t stride = X->Shape().NumDimensions() == 1 ? X->Shape()[0] : X->Shape()[1]; int64_t N = X->Shape().NumDimensions() == 1 ? 1 : X->Shape()[0]; const ITYPE* x_data = X->template Data<ITYPE>(); OTYPE* z_data = Z->template MutableData<OTYPE>(); int64_t* label_data = label == NULL ? NULL : label->template MutableData<int64_t>(); if (n_targets_or_classes_ == 1) { if (N == 1) { ScoreValue<OTYPE> score = {0, 0}; if (n_trees_ <= parallel_tree_) { for (int64_t j = 0; j < n_trees_; ++j) agg.ProcessTreeNodePrediction1(score, *ProcessTreeNodeLeave(roots_[j], x_data)); } else { std::vector<ScoreValue<OTYPE>> scores_t(n_trees_, {0, 0}); #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t j = 0; j < n_trees_; ++j) { agg.ProcessTreeNodePrediction1(scores_t[j], *ProcessTreeNodeLeave(roots_[j], x_data)); } for (auto it = scores_t.cbegin(); it != scores_t.cend(); ++it) agg.MergePrediction1(score, *it); } agg.FinalizeScores1(z_data, score, label_data); } else { if (N <= parallel_N_) { ScoreValue<OTYPE> score; size_t j; for (int64_t i = 0; i < N; ++i) { score = {0, 0}; for (j = 0; j < static_cast<size_t>(n_trees_); ++j) agg.ProcessTreeNodePrediction1(score, *ProcessTreeNodeLeave(roots_[j], x_data + i * stride)); agg.FinalizeScores1(z_data + i * n_targets_or_classes_, score, label_data == NULL ? NULL : (label_data + i)); } } else { #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t i = 0; i < N; ++i) { ScoreValue<OTYPE> score = {0, 0}; for (size_t j = 0; j < static_cast<size_t>(n_trees_); ++j) agg.ProcessTreeNodePrediction1(score, *ProcessTreeNodeLeave(roots_[j], x_data + i * stride)); agg.FinalizeScores1(z_data + i * n_targets_or_classes_, score, label_data == NULL ? NULL : (label_data + i)); } } } } else { if (N == 1) { std::vector<ScoreValue<OTYPE>> scores(n_targets_or_classes_, {0, 0}); if (n_trees_ <= parallel_tree_) { for (int64_t j = 0; j < n_trees_; ++j) agg.ProcessTreeNodePrediction(scores, *ProcessTreeNodeLeave(roots_[j], x_data)); agg.FinalizeScores(scores, z_data, -1, label_data); } else { #ifdef _OPENMP #pragma omp parallel #endif { std::vector<ScoreValue<OTYPE>> private_scores(n_targets_or_classes_, {0, 0}); #ifdef _OPENMP #pragma omp for #endif for (int64_t j = 0; j < n_trees_; ++j) { agg.ProcessTreeNodePrediction(private_scores, *ProcessTreeNodeLeave(roots_[j], x_data)); } #ifdef _OPENMP #pragma omp critical #endif agg.MergePrediction(scores, private_scores); } agg.FinalizeScores(scores, z_data, -1, label_data); } } else { if (N <= parallel_N_) { std::vector<ScoreValue<OTYPE>> scores(n_targets_or_classes_); size_t j; for (int64_t i = 0; i < N; ++i) { std::fill(scores.begin(), scores.end(), ScoreValue<OTYPE>({0, 0})); for (j = 0; j < roots_.size(); ++j) agg.ProcessTreeNodePrediction(scores, *ProcessTreeNodeLeave(roots_[j], x_data + i * stride)); agg.FinalizeScores(scores, z_data + i * n_targets_or_classes_, -1, label_data == NULL ? NULL : (label_data + i)); ORT_ENFORCE((int64_t)scores.size() == n_targets_or_classes_); } } else { #ifdef _OPENMP #pragma omp parallel #endif { std::vector<ScoreValue<OTYPE>> scores(n_targets_or_classes_); size_t j; #ifdef _OPENMP #pragma omp for #endif for (int64_t i = 0; i < N; ++i) { std::fill(scores.begin(), scores.end(), ScoreValue<OTYPE>({0, 0})); for (j = 0; j < roots_.size(); ++j) agg.ProcessTreeNodePrediction(scores, *ProcessTreeNodeLeave(roots_[j], x_data + i * stride)); agg.FinalizeScores(scores, z_data + i * n_targets_or_classes_, -1, label_data == NULL ? NULL : (label_data + i)); } } } } } } #define TREE_FIND_VALUE(CMP) \ if (has_missing_tracks_) { \ while (root->is_not_leaf) { \ val = x_data[root->feature_id]; \ root = (val CMP root->value || \ (root->is_missing_track_true && _isnan_(val))) \ ? root->truenode \ : root->falsenode; \ } \ } else { \ while (root->is_not_leaf) { \ val = x_data[root->feature_id]; \ root = val CMP root->value ? root->truenode : root->falsenode; \ } \ } inline bool _isnan_(float x) { return std::isnan(x); } inline bool _isnan_(double x) { return std::isnan(x); } inline bool _isnan_(int64_t) { return false; } inline bool _isnan_(int32_t) { return false; } template <typename ITYPE, typename OTYPE> TreeNodeElement<OTYPE>* TreeEnsembleCommon<ITYPE, OTYPE>::ProcessTreeNodeLeave( TreeNodeElement<OTYPE>* root, const ITYPE* x_data) const { ITYPE val; if (same_mode_) { switch (root->mode) { case NODE_MODE::BRANCH_LEQ: if (has_missing_tracks_) { while (root->is_not_leaf) { val = x_data[root->feature_id]; root = (val <= root->value || (root->is_missing_track_true && _isnan_(val))) ? root->truenode : root->falsenode; } } else { while (root->is_not_leaf) { val = x_data[root->feature_id]; root = val <= root->value ? root->truenode : root->falsenode; } } break; case NODE_MODE::BRANCH_LT: TREE_FIND_VALUE(<) break; case NODE_MODE::BRANCH_GTE: TREE_FIND_VALUE(>=) break; case NODE_MODE::BRANCH_GT: TREE_FIND_VALUE(>) break; case NODE_MODE::BRANCH_EQ: TREE_FIND_VALUE(==) break; case NODE_MODE::BRANCH_NEQ: TREE_FIND_VALUE(!=) break; case NODE_MODE::LEAF: break; } } else { // Different rules to compare to node thresholds. OTYPE threshold; while (root->is_not_leaf) { val = x_data[root->feature_id]; threshold = root->value; switch (root->mode) { case NODE_MODE::BRANCH_LEQ: root = val <= threshold || (root->is_missing_track_true && _isnan_(val)) ? root->truenode : root->falsenode; break; case NODE_MODE::BRANCH_LT: root = val < threshold || (root->is_missing_track_true && _isnan_(val)) ? root->truenode : root->falsenode; break; case NODE_MODE::BRANCH_GTE: root = val >= threshold || (root->is_missing_track_true && _isnan_(val)) ? root->truenode : root->falsenode; break; case NODE_MODE::BRANCH_GT: root = val > threshold || (root->is_missing_track_true && _isnan_(val)) ? root->truenode : root->falsenode; break; case NODE_MODE::BRANCH_EQ: root = val == threshold || (root->is_missing_track_true && _isnan_(val)) ? root->truenode : root->falsenode; break; case NODE_MODE::BRANCH_NEQ: root = val != threshold || (root->is_missing_track_true && _isnan_(val)) ? root->truenode : root->falsenode; break; case NODE_MODE::LEAF: break; } } } return root; } template <typename ITYPE, typename OTYPE> class TreeEnsembleCommonClassifier : TreeEnsembleCommon<ITYPE, OTYPE> { private: bool weights_are_all_positive_; bool binary_case_; std::vector<std::string> classlabels_strings_; std::vector<int64_t> classlabels_int64s_; std::vector<int64_t> class_labels_; public: TreeEnsembleCommonClassifier(int parallel_tree, int parallel_N, const std::string& aggregate_function, const std::vector<OTYPE>& base_values, const std::vector<int64_t>& nodes_falsenodeids, const std::vector<int64_t>& nodes_featureids, const std::vector<OTYPE>& nodes_hitrates, const std::vector<int64_t>& nodes_missing_value_tracks_true, const std::vector<std::string>& nodes_modes, const std::vector<int64_t>& nodes_nodeids, const std::vector<int64_t>& nodes_treeids, const std::vector<int64_t>& nodes_truenodeids, const std::vector<OTYPE>& nodes_values, const std::string& post_transform, const std::vector<int64_t>& class_ids, const std::vector<int64_t>& class_nodeids, const std::vector<int64_t>& class_treeids, const std::vector<OTYPE>& class_weights, const std::vector<std::string>& classlabels_strings, const std::vector<int64_t>& classlabels_int64s); int64_t get_class_count() const { return this->n_targets_or_classes_; } void compute(const Tensor* X, Tensor* Z, Tensor* label) const; }; template <typename ITYPE, typename OTYPE> TreeEnsembleCommonClassifier<ITYPE, OTYPE>::TreeEnsembleCommonClassifier( int parallel_tree, int parallel_N, const std::string& aggregate_function, const std::vector<OTYPE>& base_values, const std::vector<int64_t>& nodes_falsenodeids, const std::vector<int64_t>& nodes_featureids, const std::vector<OTYPE>& nodes_hitrates, const std::vector<int64_t>& nodes_missing_value_tracks_true, const std::vector<std::string>& nodes_modes, const std::vector<int64_t>& nodes_nodeids, const std::vector<int64_t>& nodes_treeids, const std::vector<int64_t>& nodes_truenodeids, const std::vector<OTYPE>& nodes_values, const std::string& post_transform, const std::vector<int64_t>& class_ids, const std::vector<int64_t>& class_nodeids, const std::vector<int64_t>& class_treeids, const std::vector<OTYPE>& class_weights, const std::vector<std::string>& classlabels_strings, const std::vector<int64_t>& classlabels_int64s) : TreeEnsembleCommon<ITYPE, OTYPE>(parallel_tree, parallel_N, aggregate_function, base_values, classlabels_strings.size() == 0 ? classlabels_int64s.size() : classlabels_strings.size(), nodes_falsenodeids, nodes_featureids, nodes_hitrates, nodes_missing_value_tracks_true, nodes_modes, nodes_nodeids, nodes_treeids, nodes_truenodeids, nodes_values, post_transform, class_ids, class_nodeids, class_treeids, class_weights) { classlabels_strings_ = classlabels_strings; classlabels_int64s_ = classlabels_int64s; std::set<int64_t> weights_classes; weights_are_all_positive_ = true; for (size_t i = 0, end = class_ids.size(); i < end; ++i) { weights_classes.insert(class_ids[i]); if (weights_are_all_positive_ && class_weights[i] < 0) weights_are_all_positive_ = false; } binary_case_ = this->n_targets_or_classes_ == 2 && weights_classes.size() == 1; if (classlabels_strings_.size() > 0) { class_labels_.resize(classlabels_strings_.size()); for (size_t i = 0; i < classlabels_strings_.size(); ++i) class_labels_[i] = i; } } template <typename ITYPE, typename OTYPE> void TreeEnsembleCommonClassifier<ITYPE, OTYPE>::compute(const Tensor* X, Tensor* Z, Tensor* label) const { if (classlabels_strings_.size() == 0) { this->compute_agg( X, Z, label, TreeAggregatorClassifier<ITYPE, OTYPE>( this->roots_.size(), this->n_targets_or_classes_, this->post_transform_, this->base_values_, classlabels_int64s_, binary_case_, weights_are_all_positive_)); } else { int64_t N = X->Shape().NumDimensions() == 1 ? 1 : X->Shape()[0]; std::shared_ptr<IAllocator> allocator = std::make_shared<CPUAllocator>(); Tensor label_int64(DataTypeImpl::GetType<int64_t>(), TensorShape({N}), allocator); this->compute_agg( X, Z, &label_int64, TreeAggregatorClassifier<ITYPE, OTYPE>( this->roots_.size(), this->n_targets_or_classes_, this->post_transform_, this->base_values_, class_labels_, binary_case_, weights_are_all_positive_)); const int64_t* plabel = label_int64.template Data<int64_t>(); std::string* labels = label->template MutableData<std::string>(); for (size_t i = 0; i < (size_t)N; ++i) labels[i] = classlabels_strings_[plabel[i]]; } } } // namespace detail } // namespace ml } // namespace onnxruntime
shuffle.c
// TODO: see http://arxiv.org/pdf/1508.03167.pdf // https://github.com/axel-bacher/mergeshuffle #include <stdlib.h> #include <stdio.h> #include <limits.h> #include <string.h> #include <sys/types.h> #include <stdint.h> #include <x86intrin.h> #define RDTSC_START(cycles) \ do { \ register unsigned cyc_high, cyc_low; \ __asm volatile("cpuid\n\t" \ "rdtsc\n\t" \ "mov %%edx, %0\n\t" \ "mov %%eax, %1\n\t" \ : "=r" (cyc_high), "=r" (cyc_low) \ :: "%rax", "%rbx", "%rcx", "%rdx"); \ (cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \ } while (0) #define RDTSC_FINAL(cycles) \ do { \ register unsigned cyc_high, cyc_low; \ __asm volatile("rdtscp\n\t" \ "mov %%edx, %0\n\t" \ "mov %%eax, %1\n\t" \ "cpuid\n\t" \ : "=r" (cyc_high), "=r" (cyc_low) \ :: "%rax", "%rbx", "%rcx", "%rdx"); \ (cycles) = ((uint64_t)cyc_high << 32) | cyc_low; \ } while(0) /******** * Next part is mersenne Twister *********/ #include "mersenne.c" /************** * next bit is from * https://github.com/axel-bacher/mergeshuffle/ ****/ struct random { unsigned long x; int c; }; // mark as used the first b bits of r->x // they should be shifted out after use (r->x >>= b) static inline void consume_bits(struct random *r, int b) { #ifdef COUNT count += b; #endif r->c -= b; if(r->c < 0) { r->x = rand64(); r->c = 64 - b; } } static inline unsigned long random_bits(struct random *r, unsigned int i) { consume_bits(r, i); int y = r->x & ((1UL << i) - 1); r->x >>= i; return y; } static inline int flip(struct random *r) { return random_bits(r, 1); } static inline unsigned long random_int(struct random *r, unsigned long n) { unsigned long v = 1; unsigned long d = 0; while(1) { d += d + flip(r); v += v; if(v >= n) { if(d < n) return d; v -= n; d -= n; } } } static inline void swap(unsigned int *a, unsigned int *b) { unsigned int x = *a; unsigned int y = *b; *a = y; *b = x; } void fisher_yates(unsigned int *t, unsigned int n) { struct random r = {0, 0}; unsigned int i; for(i = 0; i < n; i ++) { unsigned int j = random_int(&r, i + 1); swap(t + i, t + j); } } void merge(unsigned int *t, unsigned int m, unsigned int n) { unsigned int *u = t; unsigned int *v = t + m; unsigned int *w = t + n; struct random r = {0, 0}; // take elements from both halves until one is exhausted while(1) { if(flip(&r)) { if(v == w) break; swap(u, v ++); } else if(u == v) break; u ++; } // finish using Fisher-Yates while(u < w) { unsigned int i = random_int(&r, u - t + 1); swap(t + i, u ++); } } unsigned long cutoff = 0x10000; void mergeshuffle(unsigned int *t, unsigned int n) { unsigned int c = 0; while((n >> c) > cutoff) c ++; unsigned int q = 1 << c; unsigned long nn = n; unsigned int i,p; // #pragma omp parallel for for(i = 0; i < q; i ++) { unsigned long j = nn * i >> c; unsigned long k = nn * (i+1) >> c; fisher_yates(t + j, k - j); } for(p = 1; p < q; p += p) { // #pragma omp parallel for for(i = 0; i < q; i += 2*p) { unsigned long j = nn * i >> c; unsigned long k = nn * (i + p) >> c; unsigned long l = nn * (i + 2*p) >> c; merge(t + j, k - j, l - j); } } } /**** * End of import from * https://github.com/axel-bacher/mergeshuffle/ **************/ uint32_t round2 (uint32_t v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } uint32_t fairRandomInt(uint32_t size) { uint32_t candidate, rkey; // such a loop is necessary for the result to be fair rkey = fastrand(); candidate = rkey % size; // NOTE: RAND_MAX should be actual max value while(rkey - candidate > RAND_MAX - size + 1 ) { // will be predicted as false rkey = fastrand(); candidate = rkey % size; } return candidate; } // Fisher-Yates shuffle, shuffling an array of integers void shuffle(int *storage, size_t size) { size_t i; for (i=size; i>1; i--) { size_t nextpos = fairRandomInt(i); int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } uint32_t fastround2 (uint32_t v) { return 1 << (32 - __builtin_clz(v-1)); } uint32_t fastFairRandomInt(randbuf_t * rb, uint32_t size, uint32_t mask, uint32_t bused) { uint32_t candidate; candidate = grabBits( rb, mask,bused ); // such a loop is necessary for the result to be fair while(candidate >= size) { candidate = grabBits( rb, mask,bused ); } return candidate; } uint32_t ranged_random_mult_lazy(uint32_t range) { uint64_t random32bit, multiresult; uint32_t leftover; uint32_t threshold; random32bit = fastrand(); if((range & (range - 1)) == 0) { return random32bit & (range - 1); } if(range >0x80000000) {// if range > 1<<31 while(random32bit >= range) { random32bit = fastrand(); } return random32bit; // [0, range) } multiresult = random32bit * range; leftover = (uint32_t) multiresult; if(leftover < range ) { threshold = 0xFFFFFFFF % range ; while (leftover <= threshold) { random32bit = fastrand(); multiresult = random32bit * range; leftover = (uint32_t) multiresult; } } return multiresult >> 32; // [0, range) } // Fisher-Yates shuffle, shuffling an array of integers void shuffle_fastmult(int *storage, uint32_t size) { uint32_t i; for (i=size; i>1; i--) { uint32_t nextpos = ranged_random_mult_lazy(i); int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } // Fisher-Yates shuffle, shuffling an array of integers void oldfast_shuffle(int *storage, size_t size) { size_t i; uint32_t bused = 32 - __builtin_clz(size); uint32_t m2 = 1 << (32- __builtin_clz(size-1)); i=size; randbuf_t rb; rbinit(&rb); while(i>1) { for (; 2*i>m2; i--) { size_t nextpos = fastFairRandomInt(&rb, i, m2-1,bused);// int tmp = storage[i - 1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } m2 = m2 >> 1; bused--; } } void fast_shuffle(int *storage, size_t size) { size_t i; i=size; while(i>1) { uint32_t nextpos = standard_ranged(i);// int tmp = storage[i - 1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later i--; } } void fast_shuffle2(int *storage, size_t size) { size_t i; i=size; while(i>1) { uint32_t nextpos = standard_ranged2(i);// int tmp = storage[i - 1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later i--; } } // Fisher-Yates shuffle, shuffling an array of integers void shuffle_float(int *storage, uint32_t size) { uint32_t i; for (i=size; i>1; i--) { uint32_t nextpos = (uint32_t)(fastranddouble() * i); int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } // Fisher-Yates shuffle, shuffling an array of integers void fast_shuffle_floatapprox(int *storage, size_t size) { /** supposedly, you can map a 64-bit random int v to a double by doing this: v * (1.0/18446744073709551616.0L); so to get a number between 0 and x, you just multiply this by x? But this is bogus. * */ size_t i; for(i=size; i>1; i--) { uint32_t nextpos = (uint32_t)(fastrand64() * i * (1.0/18446744073709551616.0L));// int tmp = storage[i - 1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } // Random Permutations on Distributed􏰀 External and Hierarchical Memory by Peter Sanders void shuffle_sanders(int *storage, size_t size, size_t buffersize) { size_t i; size_t l; size_t NBR_BLOCK = round2((size +buffersize -1)/buffersize); size_t BLOCK_SIZE = 4 * buffersize; size_t * counter = malloc(NBR_BLOCK * sizeof(size_t)); for(i = 0 ; i < NBR_BLOCK; ++i) counter[i] = 0; int* buffer = malloc(BLOCK_SIZE * NBR_BLOCK * sizeof(int)); for(i = 0; i < size; i++) { int block = rand() & ( NBR_BLOCK - 1) ; // NBR_BLOCK is a power of two buffer[BLOCK_SIZE * block + counter[block]] = storage[i]; counter[block]++; if(counter[block]>=BLOCK_SIZE) printf("insufficient mem\n"); } l = 0; for(i = 0; i < NBR_BLOCK; i++) { shuffle(buffer + BLOCK_SIZE * i, counter[i]); memcpy(storage + l,buffer + BLOCK_SIZE * i,counter[i]*sizeof(int)); l += counter[i]; } free(buffer); free(counter); } //Fisher-Yates shuffle, shuffling an array of integers void shuffle_prefetch2(int *storage, size_t size) { size_t i; i = size; if(size > 2) { size_t nextposs[2]; nextposs[(i)%2] = fairRandomInt(i); nextposs[(i-1)%2] = fairRandomInt(i - 1); for (; i>2; i--) { int thisindex = i % 2; size_t nextpos = nextposs[thisindex]; // prefetching part size_t np = fairRandomInt(i-2); nextposs[thisindex] = np; __builtin_prefetch(storage + np); // end of prefetching int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } for (; i>1; i--) { size_t nextpos = fairRandomInt(i); int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } //Fisher-Yates shuffle, shuffling an array of integers void shuffle_prefetch4(int *storage, size_t size) { size_t i; i = size; if(size > 4) { size_t nextposs[4]; nextposs[i%4] = fairRandomInt(i); nextposs[(i-1)%4] = fairRandomInt(i - 1); nextposs[(i-2)%4] = fairRandomInt(i - 2); nextposs[(i-3)%4] = fairRandomInt(i - 3); for (; i>4; i--) { int thisindex = i % 4; size_t nextpos = nextposs[thisindex]; // prefetching part size_t np = fairRandomInt(i-4); nextposs[thisindex] = np; __builtin_prefetch(storage + np); // end of prefetching int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } for (; i>1; i--) { size_t nextpos = fairRandomInt(i); int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } //Fisher-Yates shuffle, shuffling an array of integers void shuffle4(int *storage, size_t size) { size_t i; i = size; if(size > 4) { size_t nextposs[4]; int vals[4]; for (; i>4; i-=4) { nextposs[0] = fairRandomInt(i); nextposs[1] = fairRandomInt(i - 1); nextposs[2] = fairRandomInt(i - 2); nextposs[3] = fairRandomInt(i - 3); vals[0] = storage[nextposs[0]]; vals[1] = storage[nextposs[1]]; vals[2] = storage[nextposs[2]]; vals[3] = storage[nextposs[3]]; storage[nextposs[0]] = storage[i - 1]; storage[nextposs[1]] = storage[i - 2]; storage[nextposs[2]] = storage[i - 3]; storage[nextposs[3]] = storage[i - 4]; storage[i - 1] = vals[0]; storage[i - 2] = vals[1]; storage[i - 3] = vals[2]; storage[i - 4] = vals[3]; } } for (; i>1; i--) { size_t nextpos = fairRandomInt(i); int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } //Fisher-Yates shuffle, shuffling an array of integers void shuffle_prefetch8(int *storage, size_t size) { size_t i,j ; i = size; if(size > 8) { size_t nextposs[8]; for(j = 0; j < 8; ++j) nextposs[(i-j)%8] = fairRandomInt(i-j); for (; i>8; i--) { int thisindex = i % 8; size_t nextpos = nextposs[thisindex]; // prefetching part size_t np = fairRandomInt(i-8); nextposs[thisindex] = np; __builtin_prefetch(storage + np); // end of prefetching int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } for (; i>1; i--) { size_t nextpos = fairRandomInt(i); int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } //Fisher-Yates shuffle, shuffling an array of integers void shuffle_prefetch16(int *storage, size_t size) { size_t i,j; i = size; if(size > 16) { size_t nextposs[16]; for(j = 0; j < 16; ++j) nextposs[(i-j)%16] = fairRandomInt(i-j); for (; i>16; i--) { int thisindex = i % 16; size_t nextpos = nextposs[thisindex]; // prefetching part size_t np = fairRandomInt(i-16); nextposs[thisindex] = np; __builtin_prefetch(storage + np); // end of prefetching int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } for (; i>1; i--) { size_t nextpos = fairRandomInt(i); int tmp = storage[i-1];// likely in cache int val = storage[nextpos]; // could be costly storage[i - 1] = val; storage[nextpos] = tmp; // you might have to read this store later } } // Random Permutations on Distributed􏰀 External and Hierarchical Memory by Peter Sanders void shuffle_sanders_prefetch16(int *storage, size_t size, size_t buffersize) { size_t i; size_t l; size_t NBR_BLOCK = round2((size +buffersize -1)/buffersize); size_t BLOCK_SIZE = 4 * buffersize; size_t * counter = malloc(NBR_BLOCK * sizeof(size_t)); for(i = 0 ; i < NBR_BLOCK; ++i) counter[i] = 0; int* buffer = malloc(BLOCK_SIZE * NBR_BLOCK * sizeof(int)); for(i = 0; i < size; i++) { int block = rand() & ( NBR_BLOCK - 1) ; // NBR_BLOCK is a power of two buffer[BLOCK_SIZE * block + counter[block]] = storage[i]; counter[block]++; if(counter[block]>=BLOCK_SIZE) printf("insufficient mem\n"); } l = 0; for(i = 0; i < NBR_BLOCK; i++) { shuffle_prefetch16(buffer + BLOCK_SIZE * i, counter[i]); memcpy(storage + l,buffer + BLOCK_SIZE * i,counter[i]*sizeof(int)); l += counter[i]; } free(buffer); free(counter); } int compare( const void* a, const void* b) { int int_a = * ( (int*) a ); int int_b = * ( (int*) b ); if ( int_a == int_b ) return 0; else if ( int_a < int_b ) return -1; else return 1; } int demo(size_t array_size) { int bogus = 0; size_t i; float cycles_per_search1; int *array = (int *) malloc( array_size * sizeof(int) ); uint64_t cycles_start, cycles_final; #ifdef USE_GENERIC printf("Using some basic random number generator\n"); #elif USE_SIMDMT printf("Using SIMD Mersenne Twister\n"); #elif USE_MT printf("Using Mersenne Twister\n"); #elif USE_PCG printf("Using PCG\n"); #elif USE_RAND printf("Using rand\n"); #elif USE_HARDWARE printf("Using hardware\n"); #else printf("Using SIMD Mersenne Twister\n"); #endif printf("populating array %zu \n",array_size); printf("log(array_size) = %d \n",(33 - __builtin_clz(array_size-1))); for(i = 0; i < array_size; ++i) { array[i] = i; } x=1; seedMT(x); printf("\n"); RDTSC_START(cycles_start); shuffle( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("normal shuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); shuffle_float( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("normal shuffle with floating points cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); fast_shuffle( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("fast shuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); fast_shuffle2( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("fast shuffle 2 cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); shuffle_fastmult( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("fast mult shuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); fisher_yates((unsigned int *) array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("from https://github.com/axel-bacher/mergeshuffle/ shuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); mergeshuffle((unsigned int *) array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("from https://github.com/axel-bacher/mergeshuffle/ mergeshuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); shuffle4( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("shuffle4 cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); shuffle_sanders( array, array_size, 24000 ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("sanders shuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); shuffle_prefetch2( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("prefetch 2 shuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); shuffle_prefetch4( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("prefetch 4 shuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); shuffle_prefetch8( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("prefetch 8 shuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); shuffle_prefetch16( array, array_size ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("prefetch 16 shuffle cycles per key %.2f \n", cycles_per_search1); RDTSC_START(cycles_start); shuffle_sanders_prefetch16( array, array_size, 24000 ); bogus += array[0]; RDTSC_FINAL(cycles_final); cycles_per_search1 = ( cycles_final - cycles_start) / (float) (array_size); printf("sanders with prefetch 16 shuffle cycles per key %.2f \n", cycles_per_search1); free(array); return bogus; } int testfairness(uint32_t maxsize) { printf("checking your RNG.\n"); uint32_t size; for(size = 100; size <maxsize; ++size) { uint32_t i; uint32_t m2 = 1 << (32- __builtin_clz(size-1)); double ratio = (double) size / m2; uint32_t mask = m2 -1; uint32_t count = 10000; double predicted = (1-ratio) * count; uint32_t missed = 0; for(i = 0 ; i < count; ++i ) { if((fastrand() & mask) >= size) ++missed; } if((double)missed > 1.1 * predicted + 20) { printf("size = %d missed = %d predicted = %f\n",size, missed, predicted); printf("poor RNG \n"); return -1; } } return 0; } int main( int argc, char **argv ) { int bogus = 0; size_t array_size; int r; init_gen_rand(0); // simd mersenne r = testfairness(1000); if(r == 0) printf ("good RNG\n"); else return r; for(array_size = 4096; array_size < 2147483648; array_size*=8) { bogus += demo(array_size); printf("\n"); } return bogus; }
facedist.c
/* Generated by Cython 0.24 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_24" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef CYTHON_INLINE #if defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__facedist #define __PYX_HAVE_API__facedist #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "math.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* None.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "facedist.pyx", "__init__.pxd", "type.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":725 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":726 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":727 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":728 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":732 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":733 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":734 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":735 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":739 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":740 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":749 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":750 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":754 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":757 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":761 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":765 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* ArgTypeTest.proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* BufferFormatCheck.proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); // PROTO /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) /* PyThreadStateGet.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* PyErrFetchRestore.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* None.proto */ static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* None.proto */ static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* None.proto */ static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'cython' */ /* Module declarations from 'libc.math' */ /* Module declarations from 'facedist' */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t = { "float64_t", NULL, sizeof(__pyx_t_5numpy_float64_t), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "facedist" int __pyx_module_is_main_facedist = 0; /* Implementation of 'facedist' */ static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_RuntimeError; static const char __pyx_k_A[] = "A"; static const char __pyx_k_D[] = "D"; static const char __pyx_k_ii[] = "ii"; static const char __pyx_k_jj[] = "jj"; static const char __pyx_k_nn[] = "nn"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_rd[] = "rd"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mean[] = "mean"; static const char __pyx_k_ncol[] = "ncol"; static const char __pyx_k_nrow[] = "nrow"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_cdist[] = "cdist"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_metric[] = "metric"; static const char __pyx_k_float64[] = "float64"; static const char __pyx_k_facedist[] = "facedist"; static const char __pyx_k_euclidean[] = "euclidean"; static const char __pyx_k_mean_dist[] = "mean_dist"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_dok_matrix[] = "dok_matrix"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_scipy_sparse[] = "scipy.sparse"; static const char __pyx_k_scipy_spatial_distance[] = "scipy.spatial.distance"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_home_mlode_BA_Moritz_Implementa[] = "/home/mlode/BA-Moritz/Implementation/src/face_recognition/cython_sparse_arr/facedist.pyx"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_n_s_A; static PyObject *__pyx_n_s_D; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_cdist; static PyObject *__pyx_n_s_dok_matrix; static PyObject *__pyx_n_s_euclidean; static PyObject *__pyx_n_s_facedist; static PyObject *__pyx_n_s_float64; static PyObject *__pyx_kp_s_home_mlode_BA_Moritz_Implementa; static PyObject *__pyx_n_s_ii; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_jj; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_mean; static PyObject *__pyx_n_s_mean_dist; static PyObject *__pyx_n_s_metric; static PyObject *__pyx_n_s_ncol; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_nn; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_nrow; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_rd; static PyObject *__pyx_n_s_scipy_sparse; static PyObject *__pyx_n_s_scipy_spatial_distance; static PyObject *__pyx_n_s_test; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_8facedist_mean_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_codeobj__8; /* "facedist.pyx":21 * # indices. * @cython.wraparound(False) * def mean_dist(np.ndarray A): # <<<<<<<<<<<<<< * * # declare C types for as many of our variables as possible. note that we */ /* Python wrapper */ static PyObject *__pyx_pw_8facedist_1mean_dist(PyObject *__pyx_self, PyObject *__pyx_v_A); /*proto*/ static PyMethodDef __pyx_mdef_8facedist_1mean_dist = {"mean_dist", (PyCFunction)__pyx_pw_8facedist_1mean_dist, METH_O, 0}; static PyObject *__pyx_pw_8facedist_1mean_dist(PyObject *__pyx_self, PyObject *__pyx_v_A) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("mean_dist (wrapper)", 0); if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_A), __pyx_ptype_5numpy_ndarray, 1, "A", 0))) __PYX_ERR(0, 21, __pyx_L1_error) __pyx_r = __pyx_pf_8facedist_mean_dist(__pyx_self, ((PyArrayObject *)__pyx_v_A)); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_8facedist_mean_dist(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_A) { Py_ssize_t __pyx_v_nrow; CYTHON_UNUSED Py_ssize_t __pyx_v_ncol; Py_ssize_t __pyx_v_ii; Py_ssize_t __pyx_v_jj; Py_ssize_t __pyx_v_nn; PyArrayObject *__pyx_v_D = 0; __pyx_t_5numpy_float64_t __pyx_v_rd; __Pyx_LocalBuf_ND __pyx_pybuffernd_D; __Pyx_Buffer __pyx_pybuffer_D; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; PyObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; Py_ssize_t __pyx_t_10; Py_ssize_t __pyx_t_11; Py_ssize_t __pyx_t_12; __pyx_t_5numpy_float64_t __pyx_t_13; Py_ssize_t __pyx_t_14; __Pyx_RefNannySetupContext("mean_dist", 0); __pyx_pybuffer_D.pybuffer.buf = NULL; __pyx_pybuffer_D.refcount = 0; __pyx_pybuffernd_D.data = NULL; __pyx_pybuffernd_D.rcbuffer = &__pyx_pybuffer_D; /* "facedist.pyx":27 * cdef: * # Py_ssize_t is just a special platform-specific type for indices * Py_ssize_t nrow = A.shape[0] # <<<<<<<<<<<<<< * Py_ssize_t ncol = A.shape[1] * Py_ssize_t ii, jj, nn */ __pyx_v_nrow = (__pyx_v_A->dimensions[0]); /* "facedist.pyx":28 * # Py_ssize_t is just a special platform-specific type for indices * Py_ssize_t nrow = A.shape[0] * Py_ssize_t ncol = A.shape[1] # <<<<<<<<<<<<<< * Py_ssize_t ii, jj, nn * */ __pyx_v_ncol = (__pyx_v_A->dimensions[1]); /* "facedist.pyx":33 * # this line is particularly expensive, since creating a numpy array * # involves unavoidable Python API overhead * np.ndarray[np.float64_t, ndim=1] D = np.zeros( (nrow*(nrow-1)/2), np.float64) # <<<<<<<<<<<<<< * * np.float64_t rd */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyInt_FromSsize_t(__Pyx_div_Py_ssize_t((__pyx_v_nrow * (__pyx_v_nrow - 1)), 2)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_float64); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = NULL; __pyx_t_6 = 0; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); __pyx_t_6 = 1; } } __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); if (__pyx_t_4) { __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_4); __pyx_t_4 = NULL; } __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_t_5); __pyx_t_2 = 0; __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 33, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_D.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float64_t, PyBUF_FORMAT| PyBUF_STRIDES| PyBUF_WRITABLE, 1, 0, __pyx_stack) == -1)) { __pyx_v_D = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_D.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 33, __pyx_L1_error) } else {__pyx_pybuffernd_D.diminfo[0].strides = __pyx_pybuffernd_D.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_D.diminfo[0].shape = __pyx_pybuffernd_D.rcbuffer->pybuffer.shape[0]; } } __pyx_t_8 = 0; __pyx_v_D = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "facedist.pyx":42 * * #with nogil: * for ii in prange(nrow, nogil=True, schedule='static', num_threads=24): # <<<<<<<<<<<<<< * for jj in range(ii + 1, nrow): * */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS #endif /*try:*/ { __pyx_t_6 = __pyx_v_nrow; if (1 == 0) abort(); { Py_ssize_t __pyx_parallel_temp0 = 0xbad0bad0; Py_ssize_t __pyx_parallel_temp1 = 0xbad0bad0; Py_ssize_t __pyx_parallel_temp2 = 0xbad0bad0; __pyx_t_5numpy_float64_t __pyx_parallel_temp3 = __PYX_NAN(); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_10 = (__pyx_t_6 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_10 > 0) { #ifdef _OPENMP #pragma omp parallel num_threads(24) private(__pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14) firstprivate(__pyx_t_1, __pyx_t_2, __pyx_t_3, __pyx_t_4, __pyx_t_5, __pyx_t_7) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_ii) lastprivate(__pyx_v_ii) lastprivate(__pyx_v_jj) lastprivate(__pyx_v_nn) lastprivate(__pyx_v_rd) schedule(static) #endif /* _OPENMP */ for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9++){ if (__pyx_parallel_why < 2) { __pyx_v_ii = (Py_ssize_t)(0 + 1 * __pyx_t_9); /* Initialize private variables to invalid values */ __pyx_v_jj = ((Py_ssize_t)0xbad0bad0); __pyx_v_nn = ((Py_ssize_t)0xbad0bad0); __pyx_v_rd = ((__pyx_t_5numpy_float64_t)__PYX_NAN()); /* "facedist.pyx":43 * #with nogil: * for ii in prange(nrow, nogil=True, schedule='static', num_threads=24): * for jj in range(ii + 1, nrow): # <<<<<<<<<<<<<< * * with gil: */ __pyx_t_11 = __pyx_v_nrow; for (__pyx_t_12 = (__pyx_v_ii + 1); __pyx_t_12 < __pyx_t_11; __pyx_t_12+=1) { __pyx_v_jj = __pyx_t_12; /* "facedist.pyx":45 * for jj in range(ii + 1, nrow): * * with gil: # <<<<<<<<<<<<<< * rd = np.mean(cdist(A[ii], A[jj], metric='euclidean')) * nn = ii+jj*(jj-1)/2 */ { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif /*try:*/ { /* "facedist.pyx":46 * * with gil: * rd = np.mean(cdist(A[ii], A[jj], metric='euclidean')) # <<<<<<<<<<<<<< * nn = ii+jj*(jj-1)/2 * */ __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_mean); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_GetModuleGlobalName(__pyx_n_s_cdist); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_ii, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_2 = __Pyx_GetItemInt(((PyObject *)__pyx_v_A), __pyx_v_jj, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __pyx_t_5 = 0; __pyx_t_2 = 0; __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_metric, __pyx_n_s_euclidean) < 0) __PYX_ERR(0, 46, __pyx_L15_error) __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_4, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && unlikely(PyMethod_Check(__pyx_t_7))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_1); } else { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_4, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __pyx_t_13 = __pyx_PyFloat_AsDouble(__pyx_t_1); if (unlikely((__pyx_t_13 == (npy_float64)-1) && PyErr_Occurred())) __PYX_ERR(0, 46, __pyx_L15_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_rd = __pyx_t_13; /* "facedist.pyx":47 * with gil: * rd = np.mean(cdist(A[ii], A[jj], metric='euclidean')) * nn = ii+jj*(jj-1)/2 # <<<<<<<<<<<<<< * * D[nn] = rd */ __pyx_v_nn = (__pyx_v_ii + __Pyx_div_Py_ssize_t((__pyx_v_jj * (__pyx_v_jj - 1)), 2)); } /* "facedist.pyx":45 * for jj in range(ii + 1, nrow): * * with gil: # <<<<<<<<<<<<<< * rd = np.mean(cdist(A[ii], A[jj], metric='euclidean')) * nn = ii+jj*(jj-1)/2 */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L16; } __pyx_L15_error: { #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif goto __pyx_L8_error; } __pyx_L16:; } } /* "facedist.pyx":49 * nn = ii+jj*(jj-1)/2 * * D[nn] = rd # <<<<<<<<<<<<<< * * return D */ __pyx_t_14 = __pyx_v_nn; *__Pyx_BufPtrStrided1d(__pyx_t_5numpy_float64_t *, __pyx_pybuffernd_D.rcbuffer->pybuffer.buf, __pyx_t_14, __pyx_pybuffernd_D.diminfo[0].strides) = __pyx_v_rd; } goto __pyx_L18; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L17; __pyx_L17:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_ii; __pyx_parallel_temp1 = __pyx_v_jj; __pyx_parallel_temp2 = __pyx_v_nn; __pyx_parallel_temp3 = __pyx_v_rd; } __pyx_L18:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = NULL; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = NULL; __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = NULL; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = NULL; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = NULL; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = NULL; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_ii = __pyx_parallel_temp0; __pyx_v_jj = __pyx_parallel_temp1; __pyx_v_nn = __pyx_parallel_temp2; __pyx_v_rd = __pyx_parallel_temp3; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "facedist.pyx":42 * * #with nogil: * for ii in prange(nrow, nogil=True, schedule='static', num_threads=24): # <<<<<<<<<<<<<< * for jj in range(ii + 1, nrow): * */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "facedist.pyx":51 * D[nn] = rd * * return D # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_D)); __pyx_r = ((PyObject *)__pyx_v_D); goto __pyx_L0; /* "facedist.pyx":21 * # indices. * @cython.wraparound(False) * def mean_dist(np.ndarray A): # <<<<<<<<<<<<<< * * # declare C types for as many of our variables as possible. note that we */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_7); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("facedist.mean_dist", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_D.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_D); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":203 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":206 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":207 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":209 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":212 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":214 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":217 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 218, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":221 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 222, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":224 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":225 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":229 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":230 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":231 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":233 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":236 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":237 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":238 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":239 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":243 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":250 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":256 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":258 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 259, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":260 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":261 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":262 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":275 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":278 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 278, __pyx_L1_error) break; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":279 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":280 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":282 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)malloc(0xFF)); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":283 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":284 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":288 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":292 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":294 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":771 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":774 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":780 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":790 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 794, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 795, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 796, __pyx_L1_error) } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 799, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 803, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 844, __pyx_L1_error) } __pyx_L15:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "facedist", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_A, __pyx_k_A, sizeof(__pyx_k_A), 0, 0, 1, 1}, {&__pyx_n_s_D, __pyx_k_D, sizeof(__pyx_k_D), 0, 0, 1, 1}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_cdist, __pyx_k_cdist, sizeof(__pyx_k_cdist), 0, 0, 1, 1}, {&__pyx_n_s_dok_matrix, __pyx_k_dok_matrix, sizeof(__pyx_k_dok_matrix), 0, 0, 1, 1}, {&__pyx_n_s_euclidean, __pyx_k_euclidean, sizeof(__pyx_k_euclidean), 0, 0, 1, 1}, {&__pyx_n_s_facedist, __pyx_k_facedist, sizeof(__pyx_k_facedist), 0, 0, 1, 1}, {&__pyx_n_s_float64, __pyx_k_float64, sizeof(__pyx_k_float64), 0, 0, 1, 1}, {&__pyx_kp_s_home_mlode_BA_Moritz_Implementa, __pyx_k_home_mlode_BA_Moritz_Implementa, sizeof(__pyx_k_home_mlode_BA_Moritz_Implementa), 0, 0, 1, 0}, {&__pyx_n_s_ii, __pyx_k_ii, sizeof(__pyx_k_ii), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_jj, __pyx_k_jj, sizeof(__pyx_k_jj), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_mean, __pyx_k_mean, sizeof(__pyx_k_mean), 0, 0, 1, 1}, {&__pyx_n_s_mean_dist, __pyx_k_mean_dist, sizeof(__pyx_k_mean_dist), 0, 0, 1, 1}, {&__pyx_n_s_metric, __pyx_k_metric, sizeof(__pyx_k_metric), 0, 0, 1, 1}, {&__pyx_n_s_ncol, __pyx_k_ncol, sizeof(__pyx_k_ncol), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_nn, __pyx_k_nn, sizeof(__pyx_k_nn), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_nrow, __pyx_k_nrow, sizeof(__pyx_k_nrow), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_rd, __pyx_k_rd, sizeof(__pyx_k_rd), 0, 0, 1, 1}, {&__pyx_n_s_scipy_sparse, __pyx_k_scipy_sparse, sizeof(__pyx_k_scipy_sparse), 0, 0, 1, 1}, {&__pyx_n_s_scipy_spatial_distance, __pyx_k_scipy_spatial_distance, sizeof(__pyx_k_scipy_spatial_distance), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 43, __pyx_L1_error) __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "facedist.pyx":21 * # indices. * @cython.wraparound(False) * def mean_dist(np.ndarray A): # <<<<<<<<<<<<<< * * # declare C types for as many of our variables as possible. note that we */ __pyx_tuple__7 = PyTuple_Pack(8, __pyx_n_s_A, __pyx_n_s_nrow, __pyx_n_s_ncol, __pyx_n_s_ii, __pyx_n_s_jj, __pyx_n_s_nn, __pyx_n_s_D, __pyx_n_s_rd); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); __pyx_codeobj__8 = (PyObject*)__Pyx_PyCode_New(1, 0, 8, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__7, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_home_mlode_BA_Moritz_Implementa, __pyx_n_s_mean_dist, 21, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__8)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initfacedist(void); /*proto*/ PyMODINIT_FUNC initfacedist(void) #else PyMODINIT_FUNC PyInit_facedist(void); /*proto*/ PyMODINIT_FUNC PyInit_facedist(void) #endif { PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_facedist(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("facedist", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_facedist) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "facedist")) { if (unlikely(PyDict_SetItemString(modules, "facedist", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "facedist.pyx":1 * import numpy as np # <<<<<<<<<<<<<< * from scipy.spatial.distance import cdist * from scipy.sparse import dok_matrix */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "facedist.pyx":2 * import numpy as np * from scipy.spatial.distance import cdist # <<<<<<<<<<<<<< * from scipy.sparse import dok_matrix * cimport numpy as np */ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_n_s_cdist); __Pyx_GIVEREF(__pyx_n_s_cdist); PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_cdist); __pyx_t_2 = __Pyx_Import(__pyx_n_s_scipy_spatial_distance, __pyx_t_1, -1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_cdist); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_cdist, __pyx_t_1) < 0) __PYX_ERR(0, 2, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "facedist.pyx":3 * import numpy as np * from scipy.spatial.distance import cdist * from scipy.sparse import dok_matrix # <<<<<<<<<<<<<< * cimport numpy as np * cimport cython */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_dok_matrix); __Pyx_GIVEREF(__pyx_n_s_dok_matrix); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_dok_matrix); __pyx_t_1 = __Pyx_Import(__pyx_n_s_scipy_sparse, __pyx_t_2, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_dok_matrix); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (PyDict_SetItem(__pyx_d, __pyx_n_s_dok_matrix, __pyx_t_2) < 0) __PYX_ERR(0, 3, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "facedist.pyx":21 * # indices. * @cython.wraparound(False) * def mean_dist(np.ndarray A): # <<<<<<<<<<<<<< * * # declare C types for as many of our variables as possible. note that we */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_8facedist_1mean_dist, NULL, __pyx_n_s_facedist); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_mean_dist, __pyx_t_1) < 0) __PYX_ERR(0, 21, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "facedist.pyx":1 * import numpy as np # <<<<<<<<<<<<<< * from scipy.spatial.distance import cdist * from scipy.sparse import dok_matrix */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "../../../../../intel/intelpython27/lib/python2.7/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init facedist", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init facedist"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* ArgTypeTest */ static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* BufferFormatCheck */ static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } /* GetItemInt */ static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_COMPILING_IN_CPYTHON if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyErrFetchRestore */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } Py_DECREF(obj); view->obj = NULL; } #endif /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
reduction-1.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void bar (int a[10][10][10]); extern int f[][2]; /* { dg-error "has incomplete type" "" { target c++ } } */ extern int g[]; /* { dg-error "has incomplete type" "" { target c++ } } */ void foo (int a[10][10][10], int **b, int x) { int c[10][10][0]; int d[0]; char e[12]; #pragma omp parallel reduction(+: a[:4][:0][:7]) /* { dg-error "zero length array section" } */ bar (a); #pragma omp parallel reduction(+: b[:7][0:0][:0]) /* { dg-error "zero length array section" } */ bar (a); #pragma omp parallel reduction(+: c[:][:][0:]) /* { dg-error "zero length array section|for unknown bound array type length expression must be specified" } */ bar (a); #pragma omp parallel reduction(+: a[:4][:0][:x]) /* { dg-error "zero length array section" } */ bar (a); #pragma omp parallel reduction(+: b[:x][0:0][:0]) /* { dg-error "zero length array section" } */ bar (a); #pragma omp parallel reduction(+: c[:][:x][0:]) /* { dg-error "zero length array section|for unknown bound array type length expression must be specified" } */ bar (a); #pragma omp parallel reduction(+: d) /* { dg-error "is a zero size array" } */ bar (a); #pragma omp parallel reduction(+: a[0:4]) bar (a); #pragma omp parallel reduction(+: a[2:4]) bar (a); #pragma omp parallel reduction(+: e[2:4]) bar (a); #pragma omp parallel reduction(+: a[x:4]) bar (a); #pragma omp parallel reduction(+: e[x:4]) bar (a); #pragma omp parallel reduction(+: a[x:x]) bar (a); #pragma omp parallel reduction(+: e[x:x]) bar (a); #pragma omp parallel reduction(+: a[0.5:2]) /* { dg-error "low bound \[^\n\r]* of array section does not have integral type" } */ bar (a); #pragma omp parallel reduction(+: a[0:2.5]) /* { dg-error "length \[^\n\r]* of array section does not have integral type" } */ bar (a); #pragma omp parallel reduction(+: f[:][0:2]) /* { dg-error "for unknown bound array type length expression must be specified" } */ bar (a); #pragma omp parallel reduction(+: a[:][0:10]) /* { dg-error "for array function parameter length expression must be specified" } */ bar (a); #pragma omp parallel reduction(+: a[:10][0:12]) /* { dg-error "above array section size" } */ bar (a); #pragma omp parallel reduction(+: b[0:10][0:10]) /* { dg-error "array section is not contiguous" } */ bar (a); #pragma omp parallel reduction(+: a[0:2][0:9]) /* { dg-error "array section is not contiguous" } */ bar (a); #pragma omp parallel reduction(+: f) /* { dg-error "has an incomplete type|invalid use of array with unspecified bounds" } */ bar (a); #pragma omp parallel reduction(+: g) /* { dg-error "has an incomplete type|invalid use of array with unspecified bounds" } */ bar (a); }
bfsdfs.h
namespace TSnap { ///////////////////////////////////////////////// // BFS and DFS /// Returns a directed Breadth-First-Search tree rooted at StartNId. ##GetBfsTree1 template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn); /// Returns the BFS tree size (number of nodes) and depth (number of levels) by following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true) of node StartNId. template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSzX, int& TreeDepthX); /// Finds IDs of all nodes that are at distance Hop from node StartNId. ##GetSubTreeSz template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir=false); /// Returns the number of nodes at each hop distance from the starting node StartNId. ##GetNodesAtHops template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir=false); ///////////////////////////////////////////////// // Shortest paths /// Returns the length of the shortest path from node SrcNId to node DstNId. ##GetShortPath1 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); /// Returns the length of the shortest path from node SrcNId to all other nodes in the network. ##GetShortPath2 template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=TInt::Mx); ///////////////////////////////////////////////// // Diameter /// Returns the (approximation of the) Diameter (maximum shortest path length) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsFullDiam template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter (90-th percentile of the distribution of shortest path lengths) of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam1 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir=false); /// Returns the (approximation of the) Effective Diameter and the Diameter of a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam2 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX); /// Returns the (approximation of the) Effective Diameter, the Diameter and the Average Shortest Path length in a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiam3 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX, double& AvgSPLX); /// Returns the (approximation of the) Effective Diameter, the Diameter and the Average Shortest Path length in a graph (by performing BFS from NTestNodes random starting nodes). ##GetBfsEffDiamAll template <class PGraph> double GetBfsEffDiamAll(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiamX, int& FullDiamX, double& AvgSPLX); /// Use the whole graph (all edges) to measure the shortest path lengths but only report the path lengths between nodes in the SubGraphNIdV. ##GetBfsEffDiam4 template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiamX, int& FullDiamX); // TODO: Implement in the future //template <class PGraph> int GetRangeDist(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir=false); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir=false, const int& MaxDist=1000); //template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetShortPath(TIntH& NIdPrnH, TCcQueue<int>& NIdQ, const PGraph& Graph, const int& SrcNId, const TIntSet& TargetSet, const bool& IsDir, TIntV& PathNIdV); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId); //template <class PGraph> int GetMxShortDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, int& MxDistNId, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir=false); //template <class PGraph> int GetMxGreedyDist(const PGraph& Graph, const int& SrcNId, const bool& IsDir, TCcQueue<int>& NIdQ, TCcQueue<int>& DistQ, TIntSet& VisitedH); //template <class PGraph> PNGraph GetShortPathsSubGraph(const PGraph& Graph, const TIntV& SubGraphNIdV); //template <class PGraph> PGraph GetWccPathsSubGraph(const PGraph& Graph, const TIntV& NIdV); //template <class PGraph> void GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOutEdges, int& TreeSz, int& TreeDepth); } // namespace TSnap //#////////////////////////////////////////////// /// Breath-First-Search class. /// The class is meant for executing many BFSs over a fixed graph. This means that the class can keep the hash tables and queues initialized between different calls of the DoBfs() function. template<class PGraph> class TBreathFS { public: PGraph Graph; TSnapQueue<int> Queue; TInt StartNId; TIntH NIdDistH; public: TBreathFS(const PGraph& GraphPt, const bool& InitBigQ=true) : Graph(GraphPt), Queue(InitBigQ?Graph->GetNodes():1024), NIdDistH(InitBigQ?Graph->GetNodes():1024) { } /// Sets the graph to be used by the BFS to GraphPt and resets the data structures. void SetGraph(const PGraph& GraphPt); /// Performs BFS from node id StartNode for at maps MxDist steps by only following in-links (parameter FollowIn = true) and/or out-links (parameter FollowOut = true). int DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Same functionality as DoBfs with better performance. int DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId=-1, const int& MxDist=TInt::Mx); /// Returns the number of nodes visited/reached by the BFS. int GetNVisited() const { return NIdDistH.Len(); } /// Returns the IDs of the nodes visited/reached by the BFS. void GetVisitedNIdV(TIntV& NIdV) const { NIdDistH.GetKeyV(NIdV); } /// Returns the shortst path distance between SrcNId and DistNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetHops(const int& SrcNId, const int& DstNId) const; /// Returns a random shortest path from SrcNId to DstNId. /// Note you have to first call DoBFs(). SrcNId must be equal to StartNode, otherwise return value is -1. int GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const; /* Private variables and functions for DoBfsHybrid */ private: int Stage; // 0, 2: top down, 1: bottom up static const unsigned int alpha = 100; static const unsigned int beta = 20; /* Private functions */ bool TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); bool BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn); }; template<class PGraph> void TBreathFS<PGraph>::SetGraph(const PGraph& GraphPt) { Graph=GraphPt; const int N=GraphPt->GetNodes(); if (Queue.Reserved() < N) { Queue.Gen(N); } if (NIdDistH.GetReservedKeyIds() < N) { NIdDistH.Gen(N); } } template<class PGraph> int TBreathFS<PGraph>::DoBfs(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); // const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // IAssertR(StartNodeI.GetOutDeg() > 0, TStr::Fmt("No neighbors from start node %d.", StartNode)); NIdDistH.Clr(false); NIdDistH.AddDat(StartNId, 0); Queue.Clr(false); Queue.Push(StartNId); int v, MaxDist = 0; while (! Queue.Empty()) { const int NId = Queue.Top(); Queue.Pop(); const int Dist = NIdDistH.GetDat(NId); if (Dist == MxDist) { break; } // max distance limit reached const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { // out-links for (v = 0; v < NodeI.GetOutDeg(); v++) { // out-links const int DstNId = NodeI.GetOutNId(v); if (! NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist+1); MaxDist = TMath::Mx(MaxDist, Dist+1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } if (FollowIn) { // in-links for (v = 0; v < NodeI.GetInDeg(); v++) { const int DstNId = NodeI.GetInNId(v); if (! NIdDistH.IsKey(DstNId)) { NIdDistH.AddDat(DstNId, Dist+1); MaxDist = TMath::Mx(MaxDist, Dist+1); if (DstNId == TargetNId) { return MaxDist; } Queue.Push(DstNId); } } } } return MaxDist; } template<class PGraph> int TBreathFS<PGraph>::DoBfsHybrid(const int& StartNode, const bool& FollowOut, const bool& FollowIn, const int& TargetNId, const int& MxDist) { StartNId = StartNode; IAssert(Graph->IsNode(StartNId)); if (TargetNId == StartNode) return 0; const typename PGraph::TObj::TNodeI StartNodeI = Graph->GetNI(StartNode); // Initialize vector TIntV NIdDistV(Graph->GetMxNId() + 1); for (int i = 0; i < NIdDistV.Len(); i++) { NIdDistV.SetVal(i, -1); } TIntV *Frontier = new TIntV(Graph->GetNodes(), 0); TIntV *NextFrontier = new TIntV(Graph->GetNodes(), 0); NIdDistV.SetVal(StartNId, 0); Frontier->Add(StartNId); Stage = 0; int MaxDist = -1; const unsigned int TotalNodes = Graph->GetNodes(); unsigned int UnvisitedNodes = Graph->GetNodes(); while (! Frontier->Empty()) { MaxDist += 1; NextFrontier->Clr(false); if (MaxDist == MxDist) { break; } // max distance limit reached UnvisitedNodes -= Frontier->Len(); if (Stage == 0 && UnvisitedNodes / Frontier->Len() < alpha) { Stage = 1; } else if (Stage == 1 && TotalNodes / Frontier->Len() > beta) { Stage = 2; } // Top down or bottom up depending on stage bool targetFound = false; if (Stage == 0 || Stage == 2) { targetFound = TopDownStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } else { targetFound = BottomUpStep(NIdDistV, Frontier, NextFrontier, MaxDist, TargetNId, FollowOut, FollowIn); } if (targetFound) { MaxDist = NIdDistV[TargetNId]; break; } // swap Frontier and NextFrontier TIntV *temp = Frontier; Frontier = NextFrontier; NextFrontier = temp; } delete Frontier; delete NextFrontier; // Transform vector to hash table NIdDistH.Clr(false); for (int NId = 0; NId < NIdDistV.Len(); NId++) { if (NIdDistV[NId] != -1) { NIdDistH.AddDat(NId, NIdDistV[NId]); } } return MaxDist; } template<class PGraph> bool TBreathFS<PGraph>::TopDownStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (TIntV::TIter it = Frontier->BegI(); it != Frontier->EndI(); ++it) { // loop over frontier const int NId = *it; const int Dist = NIdDistV[NId]; IAssert(Dist == MaxDist); // Must equal to MaxDist const typename PGraph::TObj::TNodeI NodeI = Graph->GetNI(NId); if (FollowOut) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int NeighborNId = NodeI.GetOutNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } if (FollowIn) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int NeighborNId = NodeI.GetInNId(v); if (NIdDistV[NeighborNId] == -1) { NIdDistV.SetVal(NeighborNId, Dist+1); if (NeighborNId == TargetNId) return true; NextFrontier->Add(NeighborNId); } } } } return false; } template<class PGraph> bool TBreathFS<PGraph>::BottomUpStep(TIntV &NIdDistV, TIntV *Frontier, TIntV *NextFrontier, int& MaxDist, const int& TargetNId, const bool& FollowOut, const bool& FollowIn) { for (typename PGraph::TObj::TNodeI NodeI = Graph->BegNI(); NodeI < Graph->EndNI(); NodeI++) { const int NId = NodeI.GetId(); if (NIdDistV[NId] == -1) { if (FollowOut) { for (int v = 0; v < NodeI.GetInDeg(); v++) { const int ParentNId = NodeI.GetInNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } if (FollowIn && NIdDistV[NId] == -1) { for (int v = 0; v < NodeI.GetOutDeg(); v++) { const int ParentNId = NodeI.GetOutNId(v); if (NIdDistV[ParentNId] == MaxDist) { NIdDistV[NId] = MaxDist + 1; if (NId == TargetNId) return true; NextFrontier->Add(NId); break; } } } } } return false; } template<class PGraph> int TBreathFS<PGraph>::GetHops(const int& SrcNId, const int& DstNId) const { TInt Dist; if (SrcNId!=StartNId) { return -1; } if (! NIdDistH.IsKeyGetDat(DstNId, Dist)) { return -1; } return Dist.Val; } template<class PGraph> int TBreathFS<PGraph>::GetRndPath(const int& SrcNId, const int& DstNId, TIntV& PathNIdV) const { PathNIdV.Clr(false); if (SrcNId!=StartNId || ! NIdDistH.IsKey(DstNId)) { return -1; } PathNIdV.Add(DstNId); TIntV CloserNIdV; int CurNId = DstNId; TInt CurDist, NextDist; while (CurNId != SrcNId) { typename PGraph::TObj::TNodeI NI = Graph->GetNI(CurNId); IAssert(NIdDistH.IsKeyGetDat(CurNId, CurDist)); CloserNIdV.Clr(false); for (int e = 0; e < NI.GetDeg(); e++) { const int Next = NI.GetNbrNId(e); if (NIdDistH.IsKeyGetDat(Next, NextDist)) { if (NextDist == CurDist-1) { CloserNIdV.Add(Next); } } } IAssert(! CloserNIdV.Empty()); CurNId = CloserNIdV[TInt::Rnd.GetUniDevInt(CloserNIdV.Len())]; PathNIdV.Add(CurNId); } PathNIdV.Reverse(); return PathNIdV.Len()-1; } ///////////////////////////////////////////////// // Implementation namespace TSnap { template <class PGraph> PNGraph GetBfsTree(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); PNGraph Tree = TNGraph::New(); BFS.NIdDistH.SortByDat(); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { const int NId = BFS.NIdDistH.GetKey(i); const int Dist = BFS.NIdDistH[i]; typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); if (!Tree->IsNode(NId)) { Tree->AddNode(NId); } if (FollowOut) { for (int e = 0; e < NI.GetInDeg(); e++) { const int Prev = NI.GetInNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } if (FollowIn) { for (int e = 0; e < NI.GetOutDeg(); e++) { const int Prev = NI.GetOutNId(e); if (Tree->IsNode(Prev) && BFS.NIdDistH.GetDat(Prev)==Dist-1) { Tree->AddEdge(Prev, NId); } } } } return Tree; } template <class PGraph> int GetSubTreeSz(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, int& TreeSz, int& TreeDepth) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, FollowOut, FollowIn, -1, TInt::Mx); TreeSz = BFS.NIdDistH.Len(); TreeDepth = 0; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { TreeDepth = TMath::Mx(TreeDepth, BFS.NIdDistH[i].Val); } return TreeSz; } template <class PGraph> int GetNodesAtHop(const PGraph& Graph, const int& StartNId, const int& Hop, TIntV& NIdV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, Hop); NIdV.Clr(false); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { if (BFS.NIdDistH[i] == Hop) { NIdV.Add(BFS.NIdDistH.GetKey(i)); } } return NIdV.Len(); } template <class PGraph> int GetNodesAtHops(const PGraph& Graph, const int& StartNId, TIntPrV& HopCntV, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(StartNId, true, !IsDir, -1, TInt::Mx); TIntH HopCntH; for (int i = 0; i < BFS.NIdDistH.Len(); i++) { HopCntH.AddDat(BFS.NIdDistH[i]) += 1; } HopCntH.GetKeyDatPrV(HopCntV); HopCntV.Sort(); return HopCntV.Len(); } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, TIntH& NIdToDistH, const bool& IsDir, const int& MaxDist) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, -1, MaxDist); NIdToDistH.Clr(); NIdToDistH.Swap(BFS.NIdDistH); return NIdToDistH[NIdToDistH.Len()-1]; } template <class PGraph> int GetShortPath(const PGraph& Graph, const int& SrcNId, const int& DstNId, const bool& IsDir) { TBreathFS<PGraph> BFS(Graph); BFS.DoBfs(SrcNId, true, ! IsDir, DstNId, TInt::Mx); return BFS.GetHops(SrcNId, DstNId); } template <class PGraph> int GetBfsFullDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return FullDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir) { int FullDiam; double EffDiam; GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam); return EffDiam; } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam) { double AvgDiam; EffDiam = -1; FullDiam = -1; return GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam, AvgDiam); } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam, double& AvgSPL) { EffDiam = -1; FullDiam = -1; AvgSPL = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV; Graph->GetNIdV(NodeIdV); NodeIdV.Shuffle(TInt::Rnd); for (int tries = 0; tries < TMath::Mn(NTestNodes, Graph->GetNodes()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < BFS.NIdDistH.Len(); i++) { DistToCntH.AddDat(BFS.NIdDistH[i]) += 1; } } TIntFltKdV DistNbrsPdfV; double SumPathL=0, PathCnt=0; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); SumPathL += DistToCntH.GetKey(i) * DistToCntH[i]; PathCnt += DistToCntH[i]; } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) AvgSPL = SumPathL/PathCnt; // average shortest path length return EffDiam; } template <class PGraph> double GetBfsEffDiamAll(const PGraph& Graph, const int& NTestNodes, const bool& IsDir, double& EffDiam, int& FullDiam, double& AvgSPL) { return GetBfsEffDiam(Graph, NTestNodes, IsDir, EffDiam, FullDiam, AvgSPL); } template <class PGraph> double GetBfsEffDiam(const PGraph& Graph, const int& NTestNodes, const TIntV& SubGraphNIdV, const bool& IsDir, double& EffDiam, int& FullDiam) { EffDiam = -1; FullDiam = -1; TIntFltH DistToCntH; TBreathFS<PGraph> BFS(Graph); // shotest paths TIntV NodeIdV(SubGraphNIdV); NodeIdV.Shuffle(TInt::Rnd); TInt Dist; for (int tries = 0; tries < TMath::Mn(NTestNodes, SubGraphNIdV.Len()); tries++) { const int NId = NodeIdV[tries]; BFS.DoBfs(NId, true, ! IsDir, -1, TInt::Mx); for (int i = 0; i < SubGraphNIdV.Len(); i++) { if (BFS.NIdDistH.IsKeyGetDat(SubGraphNIdV[i], Dist)) { DistToCntH.AddDat(Dist) += 1; } } } TIntFltKdV DistNbrsPdfV; for (int i = 0; i < DistToCntH.Len(); i++) { DistNbrsPdfV.Add(TIntFltKd(DistToCntH.GetKey(i), DistToCntH[i])); } DistNbrsPdfV.Sort(); EffDiam = TSnap::TSnapDetail::CalcEffDiamPdf(DistNbrsPdfV, 0.9); // effective diameter (90-th percentile) FullDiam = DistNbrsPdfV.Last().Key; // approximate full diameter (max shortest path length over the sampled nodes) return EffDiam; // average shortest path length } template <class PGraph> int GetShortestDistances(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { PSOut StdOut = TStdOut::New(); int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (ShortestDists[OutNId].Val == InfDepth) { ShortestDists[OutNId] = Depth; PNextV->Add(OutNId); } } } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #ifdef USE_OPENMP template <class PGraph> int GetShortestDistancesMP2(const PGraph& Graph, const int& StartNId, const bool& FollowOut, const bool& FollowIn, TIntV& ShortestDists) { int MxNId = Graph->GetMxNId(); int NonNodeDepth = 2147483647; // INT_MAX int InfDepth = 2147483646; // INT_MAX - 1 ShortestDists.Gen(MxNId); #pragma omp parallel for schedule(dynamic,10000) for (int NId = 0; NId < MxNId; NId++) { if (Graph->IsNode(NId)) { ShortestDists[NId] = InfDepth; } else { ShortestDists[NId] = NonNodeDepth; } } TIntV Vec1(MxNId, 0); // ensure enough capacity TIntV Vec2(MxNId, 0); // ensure enough capacity ShortestDists[StartNId] = 0; TIntV* PCurV = &Vec1; PCurV->Add(StartNId); TIntV* PNextV = &Vec2; int Depth = 0; // current depth while (!PCurV->Empty()) { Depth++; // increase depth #pragma omp parallel for schedule(dynamic,10000) for (int i = 0; i < PCurV->Len(); i++) { int NId = PCurV->GetVal(i); typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); for (int e = 0; e < NI.GetOutDeg(); e++) { const int OutNId = NI.GetOutNId(e); if (__sync_bool_compare_and_swap(&(ShortestDists[OutNId].Val), InfDepth, Depth)) { PNextV->AddMP(OutNId); } } } // #pragma omp parallel for schedule(dynamic,10000) // for (int NId = 0; NId < MxNId; NId++) { // if (ShortestDists[NId] == InfDepth) { // typename PGraph::TObj::TNodeI NI = Graph->GetNI(NId); // for (int e = 0; e < NI.GetInDeg(); e++) { // const int InNId = NI.GetInNId(e); // if (ShortestDists[InNId] < Depth) { // ShortestDists[NId] = Depth; // PNextV->AddMP(NId); // break; // } // } // } // } // swap pointer, no copying TIntV* Tmp = PCurV; PCurV = PNextV; PNextV = Tmp; // clear next PNextV->Reduce(0); // reduce length, does not initialize new array } return Depth-1; } #endif // USE_OPENMP } // namespace TSnap
boomerAMG.c
#include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <string.h> #include <mpi.h> #include "omp.h" #include "boomerAMG.h" static double boomerAMGParam[BOOMERAMG_NPARAM]; #ifdef HYPRE #include "_hypre_utilities.h" #include "HYPRE_parcsr_ls.h" #include "_hypre_parcsr_ls.h" #include "HYPRE.h" typedef struct hypre_data { MPI_Comm comm; HYPRE_Solver solver; HYPRE_IJMatrix A; HYPRE_IJVector b; HYPRE_IJVector x; HYPRE_BigInt ilower; HYPRE_BigInt *ii; HYPRE_Real *bb; HYPRE_Real *xx; int nRows; int Nthreads; } hypre_data; static hypre_data *data; int boomerAMGSetup(int nrows, int nz, const long long int *Ai, const long long int *Aj, const double *Av, const int null_space, const MPI_Comm ce, int Nthreads, int deviceID, const int useFP32, const double *param) { data = (hypre_data*) malloc(sizeof(struct hypre_data)); data->Nthreads = Nthreads; MPI_Comm comm; MPI_Comm_dup(ce, &comm); data->comm = comm; int rank; MPI_Comm_rank(comm,&rank); if(sizeof(HYPRE_Real) != ((useFP32) ? sizeof(float) : sizeof(double))) { if(rank == 0) printf("HYPRE has not been built to support FP32.\n"); MPI_Abort(ce, 1); } long long rowStart = nrows; MPI_Scan(MPI_IN_PLACE, &rowStart, 1, MPI_LONG_LONG, MPI_SUM, ce); rowStart -= nrows; data->nRows = nrows; HYPRE_BigInt ilower = (HYPRE_BigInt) rowStart; data->ilower = ilower; HYPRE_BigInt iupper = ilower + (HYPRE_BigInt) nrows - 1; HYPRE_IJMatrixCreate(comm,ilower,iupper,ilower,iupper,&data->A); HYPRE_IJMatrix A_ij = data->A; HYPRE_IJMatrixSetObjectType(A_ij,HYPRE_PARCSR); HYPRE_IJMatrixInitialize(A_ij); int i; for(i=0; i<nz; i++) { HYPRE_BigInt mati = (HYPRE_BigInt)(Ai[i]); HYPRE_BigInt matj = (HYPRE_BigInt)(Aj[i]); HYPRE_Real matv = (HYPRE_Real) Av[i]; HYPRE_Int ncols = 1; // number of columns per row HYPRE_IJMatrixSetValues(A_ij, 1, &ncols, &mati, &matj, &matv); } HYPRE_IJMatrixAssemble(A_ij); //HYPRE_IJMatrixPrint(A_ij, "matrix.dat"); // Create AMG solver HYPRE_BoomerAMGCreate(&data->solver); HYPRE_Solver solver = data->solver; int uparam = (int) param[0]; // Set AMG parameters if (uparam) { int i; for (i = 0; i < BOOMERAMG_NPARAM; i++) boomerAMGParam[i] = param[i+1]; } else { boomerAMGParam[0] = 10; /* coarsening */ boomerAMGParam[1] = 6; /* interpolation */ boomerAMGParam[2] = 1; /* number of cycles */ boomerAMGParam[3] = 6; /* smoother for crs level */ boomerAMGParam[4] = 3; /* sweeps */ boomerAMGParam[5] = -1; /* smoother */ boomerAMGParam[6] = 1; /* sweeps */ boomerAMGParam[7] = 0.25; /* threshold */ boomerAMGParam[8] = 0.0; /* non galerkin tolerance */ } HYPRE_BoomerAMGSetCoarsenType(solver,boomerAMGParam[0]); HYPRE_BoomerAMGSetInterpType(solver,boomerAMGParam[1]); //HYPRE_BoomerAMGSetKeepTranspose(solver, 1); //HYPRE_BoomerAMGSetChebyFraction(solver, 0.2); if (boomerAMGParam[5] > 0) { HYPRE_BoomerAMGSetCycleRelaxType(solver, boomerAMGParam[5], 1); HYPRE_BoomerAMGSetCycleRelaxType(solver, boomerAMGParam[5], 2); } HYPRE_BoomerAMGSetCycleRelaxType(solver, 9, 3); HYPRE_BoomerAMGSetCycleNumSweeps(solver, boomerAMGParam[6], 1); HYPRE_BoomerAMGSetCycleNumSweeps(solver, boomerAMGParam[6], 2); HYPRE_BoomerAMGSetCycleNumSweeps(solver, 1, 3); if (null_space) { HYPRE_BoomerAMGSetMinCoarseSize(solver, 2); HYPRE_BoomerAMGSetCycleRelaxType(solver, boomerAMGParam[3], 3); HYPRE_BoomerAMGSetCycleNumSweeps(solver, boomerAMGParam[4], 3); } HYPRE_BoomerAMGSetStrongThreshold(solver,boomerAMGParam[7]); if (boomerAMGParam[8] > 1e-3) { HYPRE_BoomerAMGSetNonGalerkinTol(solver,boomerAMGParam[8]); HYPRE_BoomerAMGSetLevelNonGalerkinTol(solver,0.0 , 0); HYPRE_BoomerAMGSetLevelNonGalerkinTol(solver,0.01, 1); HYPRE_BoomerAMGSetLevelNonGalerkinTol(solver,0.05, 2); } HYPRE_BoomerAMGSetAggNumLevels(solver, boomerAMGParam[9]); HYPRE_BoomerAMGSetMaxIter(solver,boomerAMGParam[2]); // number of V-cycles HYPRE_BoomerAMGSetTol(solver,0); HYPRE_BoomerAMGSetPrintLevel(solver,1); // Create and initialize rhs and solution vectors HYPRE_IJVectorCreate(comm,ilower,iupper,&data->b); HYPRE_IJVector b = data->b; HYPRE_IJVectorSetObjectType(b,HYPRE_PARCSR); HYPRE_IJVectorInitialize(b); HYPRE_IJVectorAssemble(b); HYPRE_IJVectorCreate(comm,ilower,iupper,&data->x); HYPRE_IJVector x = data->x; HYPRE_IJVectorSetObjectType(x,HYPRE_PARCSR); HYPRE_IJVectorInitialize(x); HYPRE_IJVectorAssemble(x); // Perform AMG setup HYPRE_ParVector par_b; HYPRE_ParVector par_x; HYPRE_IJVectorGetObject(b,(void**) &par_b); HYPRE_IJVectorGetObject(x,(void**) &par_x); HYPRE_ParCSRMatrix par_A; HYPRE_IJMatrixGetObject(data->A,(void**) &par_A); int _Nthreads = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid==0) _Nthreads = omp_get_num_threads(); } omp_set_num_threads(data->Nthreads); HYPRE_BoomerAMGSetup(solver,par_A,par_b,par_x); omp_set_num_threads(_Nthreads); data->ii = (HYPRE_BigInt*) malloc(data->nRows*sizeof(HYPRE_BigInt)); data->bb = (HYPRE_Real*) malloc(data->nRows*sizeof(HYPRE_Real)); data->xx = (HYPRE_Real*) malloc(data->nRows*sizeof(HYPRE_Real)); for(i=0;i<data->nRows;++i) data->ii[i] = ilower + (HYPRE_BigInt)i; return 0; } int boomerAMGSolve(void *x, void *b) { int i; int err; const HYPRE_Real *xx = (HYPRE_Real*) x; const HYPRE_Real *bb = (HYPRE_Real*) b; HYPRE_ParVector par_x; HYPRE_ParVector par_b; HYPRE_ParCSRMatrix par_A; HYPRE_IJVectorSetValues(data->b,data->nRows,data->ii,bb); HYPRE_IJVectorAssemble(data->b); HYPRE_IJVectorGetObject(data->b,(void**) &par_b); HYPRE_IJVectorAssemble(data->x); HYPRE_IJVectorGetObject(data->x,(void **) &par_x); HYPRE_IJMatrixGetObject(data->A,(void**) &par_A); int _Nthreads = 1; #pragma omp parallel { int tid = omp_get_thread_num(); if(tid==0) _Nthreads = omp_get_num_threads(); } omp_set_num_threads(data->Nthreads); err = HYPRE_BoomerAMGSolve(data->solver,par_A,par_b,par_x); if(err > 0) { int rank; MPI_Comm_rank(data->comm,&rank); if(rank == 0) printf("HYPRE_BoomerAMGSolve failed!\n"); return 1; } omp_set_num_threads(_Nthreads); HYPRE_IJVectorGetValues(data->x,data->nRows,data->ii,(HYPRE_Real*)xx); return 0; } void boomerAMGFree() { HYPRE_BoomerAMGDestroy(data->solver); HYPRE_IJMatrixDestroy(data->A); HYPRE_IJVectorDestroy(data->x); HYPRE_IJVectorDestroy(data->b); free(data); } // Just to fix a hypre linking error void hypre_blas_xerbla() { } void hypre_blas_lsame() { } #else int boomerAMGSetup(int nrows, int nz, const long long int *Ai, const long long int *Aj, const double *Av, const int null_space, const MPI_Comm ce, int Nthreads, int deviceID const double *param) { int rank; MPI_Comm_rank(ce,&rank); if(rank == 0) printf("ERROR: Recompile with HYPRE support!\n"); return 1; } int boomerAMGSolve(void *x, void *b) { int rank; MPI_Comm_rank(ce,&rank); if(rank == 0) printf("ERROR: Recompile with HYPRE support!\n"); return 1; } void boomerAMGFree() { int rank; MPI_Comm_rank(ce,&rank); if(rank == 0) printf("ERROR: Recompile with HYPRE support!\n"); MPI_Abort(ce, 1); } #endif
mixed_tentusscher_myo_epi_2004_S3.c
// Scenario 3 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt + Rd) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S3.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.7596599603487,0.00123838857632763,0.784369818846026,0.784223148947282,0.000169972136689011,0.487082365294413,0.00290049182352458,0.999998410215409,1.87279005544269e-08,1.84341746908718e-05,0.999781004659642,1.00771223118124,0.999999564103621,3.04673432492567e-05,0.993358298469861,10.1763606222150,139.168522102236}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.6970262149558,2.32527331724419e-05,0.000121747898718481,0.000276971880166082,0.210038991991875,0.120908114803453,0.200498466936257,5.12988959137240,0.0151231713364490,1.26415205898593,1083.02600285230,0.000542147164379904,0.160470068504854,0.0146070055973378,0.00183114105726186,1.00487709573505e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
StmtOpenMP.h
//===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// \file /// This file defines OpenMP AST classes for executable directives and /// clauses. /// //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMTOPENMP_H #define LLVM_CLANG_AST_STMTOPENMP_H #include "clang/AST/Expr.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" namespace clang { //===----------------------------------------------------------------------===// // AST classes for directives. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP executable /// directive. /// class OMPExecutableDirective : public Stmt { friend class ASTStmtReader; /// Kind of the directive. OpenMPDirectiveKind Kind; /// Starting location of the directive (directive keyword). SourceLocation StartLoc; /// Ending location of the directive. SourceLocation EndLoc; /// Numbers of clauses. const unsigned NumClauses; /// Number of child expressions/stmts. const unsigned NumChildren; /// Offset from this to the start of clauses. /// There are NumClauses pointers to clauses, they are followed by /// NumChildren pointers to child stmts/exprs (if the directive type /// requires an associated stmt, then it has to be the first of them). const unsigned ClausesOffset; /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>( reinterpret_cast<char *>(this) + ClausesOffset); return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses); } protected: /// Build instance of directive of class \a K. /// /// \param SC Statement class. /// \param K Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// template <typename T> OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses, unsigned NumChildren) : Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)), EndLoc(std::move(EndLoc)), NumClauses(NumClauses), NumChildren(NumChildren), ClausesOffset(llvm::alignTo(sizeof(T), alignof(OMPClause *))) {} /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Set the associated statement for the directive. /// /// /param S Associated statement. /// void setAssociatedStmt(Stmt *S) { assert(hasAssociatedStmt() && "no associated statement."); *child_begin() = S; } public: /// Iterates over expressions/statements used in the construct. class used_clauses_child_iterator : public llvm::iterator_adaptor_base< used_clauses_child_iterator, ArrayRef<OMPClause *>::iterator, std::forward_iterator_tag, Stmt *, ptrdiff_t, Stmt *, Stmt *> { ArrayRef<OMPClause *>::iterator End; OMPClause::child_iterator ChildI, ChildEnd; void MoveToNext() { if (ChildI != ChildEnd) return; while (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); if (ChildI != ChildEnd) return; } } } public: explicit used_clauses_child_iterator(ArrayRef<OMPClause *> Clauses) : used_clauses_child_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); MoveToNext(); } } Stmt *operator*() const { return *ChildI; } Stmt *operator->() const { return **this; } used_clauses_child_iterator &operator++() { ++ChildI; if (ChildI != ChildEnd) return *this; if (this->I != End) { ++this->I; if (this->I != End) { ChildI = (*this->I)->used_children().begin(); ChildEnd = (*this->I)->used_children().end(); } } MoveToNext(); return *this; } }; static llvm::iterator_range<used_clauses_child_iterator> used_clauses_children(ArrayRef<OMPClause *> Clauses) { return {used_clauses_child_iterator(Clauses), used_clauses_child_iterator(llvm::makeArrayRef(Clauses.end(), 0))}; } /// Iterates over a filtered subrange of clauses applied to a /// directive. /// /// This iterator visits only clauses of type SpecificClause. template <typename SpecificClause> class specific_clause_iterator : public llvm::iterator_adaptor_base< specific_clause_iterator<SpecificClause>, ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag, const SpecificClause *, ptrdiff_t, const SpecificClause *, const SpecificClause *> { ArrayRef<OMPClause *>::const_iterator End; void SkipToNextClause() { while (this->I != End && !isa<SpecificClause>(*this->I)) ++this->I; } public: explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses) : specific_clause_iterator::iterator_adaptor_base(Clauses.begin()), End(Clauses.end()) { SkipToNextClause(); } const SpecificClause *operator*() const { return cast<SpecificClause>(*this->I); } const SpecificClause *operator->() const { return **this; } specific_clause_iterator &operator++() { ++this->I; SkipToNextClause(); return *this; } }; template <typename SpecificClause> static llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind(ArrayRef<OMPClause *> Clauses) { return {specific_clause_iterator<SpecificClause>(Clauses), specific_clause_iterator<SpecificClause>( llvm::makeArrayRef(Clauses.end(), 0))}; } template <typename SpecificClause> llvm::iterator_range<specific_clause_iterator<SpecificClause>> getClausesOfKind() const { return getClausesOfKind<SpecificClause>(clauses()); } /// Gets a single clause of the specified kind associated with the /// current directive iff there is only one clause of this kind (and assertion /// is fired if there is more than one clause is associated with the /// directive). Returns nullptr if no clause of this kind is associated with /// the directive. template <typename SpecificClause> const SpecificClause *getSingleClause() const { auto Clauses = getClausesOfKind<SpecificClause>(); if (Clauses.begin() != Clauses.end()) { assert(std::next(Clauses.begin()) == Clauses.end() && "There are at least 2 clauses of the specified kind"); return *Clauses.begin(); } return nullptr; } /// Returns true if the current directive has one or more clauses of a /// specific kind. template <typename SpecificClause> bool hasClausesOfKind() const { auto Clauses = getClausesOfKind<SpecificClause>(); return Clauses.begin() != Clauses.end(); } /// Returns starting location of directive kind. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns ending location of directive. SourceLocation getEndLoc() const { return EndLoc; } /// Set starting location of directive kind. /// /// \param Loc New starting location of directive. /// void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Set ending location of directive. /// /// \param Loc New ending location of directive. /// void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Get number of clauses. unsigned getNumClauses() const { return NumClauses; } /// Returns specified clause. /// /// \param i Number of clause. /// OMPClause *getClause(unsigned i) const { return clauses()[i]; } /// Returns true if directive has associated statement. bool hasAssociatedStmt() const { return NumChildren > 0; } /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } Stmt *getAssociatedStmt() { assert(hasAssociatedStmt() && "no associated statement."); return *child_begin(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. // // \param RegionKind Component region kind. const CapturedStmt *getCapturedStmt(OpenMPDirectiveKind RegionKind) const { SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(std::any_of( CaptureRegions.begin(), CaptureRegions.end(), [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt *getInnermostCapturedStmt() { assert(hasAssociatedStmt() && getAssociatedStmt() && "Must have associated statement."); SmallVector<OpenMPDirectiveKind, 4> CaptureRegions; getOpenMPCaptureRegions(CaptureRegions, getDirectiveKind()); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt *getInnermostCapturedStmt() const { return const_cast<OMPExecutableDirective *>(this) ->getInnermostCapturedStmt(); } OpenMPDirectiveKind getDirectiveKind() const { return Kind; } static bool classof(const Stmt *S) { return S->getStmtClass() >= firstOMPExecutableDirectiveConstant && S->getStmtClass() <= lastOMPExecutableDirectiveConstant; } child_range children() { if (!hasAssociatedStmt()) return child_range(child_iterator(), child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end()); /// Do not mark all the special expression/statements as children, except /// for the associated statement. return child_range(ChildStorage, ChildStorage + 1); } const_child_range children() const { if (!hasAssociatedStmt()) return const_child_range(const_child_iterator(), const_child_iterator()); Stmt **ChildStorage = reinterpret_cast<Stmt **>( const_cast<OMPExecutableDirective *>(this)->getClauses().end()); return const_child_range(ChildStorage, ChildStorage + 1); } ArrayRef<OMPClause *> clauses() { return getClauses(); } ArrayRef<OMPClause *> clauses() const { return const_cast<OMPExecutableDirective *>(this)->getClauses(); } /// Returns whether or not this is a Standalone directive. /// /// Stand-alone directives are executable directives /// that have no associated user code. bool isStandaloneDirective() const; /// Returns the AST node representing OpenMP structured-block of this /// OpenMP executable directive, /// Prerequisite: Executable Directive must not be Standalone directive. const Stmt *getStructuredBlock() const; Stmt *getStructuredBlock() { return const_cast<Stmt *>( const_cast<const OMPExecutableDirective *>(this)->getStructuredBlock()); } }; /// This represents '#pragma omp parallel' directive. /// /// \code /// #pragma omp parallel private(a,b) reduction(+: c,d) /// \endcode /// In this example directive '#pragma omp parallel' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending Location of the directive. /// OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement associated with the directive. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelDirectiveClass; } }; /// This is a common base class for loop directives ('omp simd', 'omp /// for', 'omp for simd' etc.). It is responsible for the loop code generation. /// class OMPLoopDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Number of collapsed loops as specified by 'collapse' clause. unsigned CollapsedNum; /// Offsets to the stored exprs. /// This enumeration contains offsets to all the pointers to children /// expressions stored in OMPLoopDirective. /// The first 9 children are necessary for all the loop directives, /// the next 8 are specific to the worksharing ones, and the next 11 are /// used for combined constructs containing two pragmas associated to loops. /// After the fixed children, three arrays of length CollapsedNum are /// allocated: loop counters, their updates and final values. /// PrevLowerBound and PrevUpperBound are used to communicate blocking /// information in composite constructs which require loop blocking /// DistInc is used to generate the increment expression for the distribute /// loop when combined with a further nested loop /// PrevEnsureUpperBound is used as the EnsureUpperBound expression for the /// for loop when combined with a previous distribute loop in the same pragma /// (e.g. 'distribute parallel for') /// enum { AssociatedStmtOffset = 0, IterationVariableOffset = 1, LastIterationOffset = 2, CalcLastIterationOffset = 3, PreConditionOffset = 4, CondOffset = 5, InitOffset = 6, IncOffset = 7, PreInitsOffset = 8, // The '...End' enumerators do not correspond to child expressions - they // specify the offset to the end (and start of the following counters/ // updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays). DefaultEnd = 9, // The following 8 exprs are used by worksharing and distribute loops only. IsLastIterVariableOffset = 9, LowerBoundVariableOffset = 10, UpperBoundVariableOffset = 11, StrideVariableOffset = 12, EnsureUpperBoundOffset = 13, NextLowerBoundOffset = 14, NextUpperBoundOffset = 15, NumIterationsOffset = 16, // Offset to the end for worksharing loop directives. WorksharingEnd = 17, PrevLowerBoundVariableOffset = 17, PrevUpperBoundVariableOffset = 18, DistIncOffset = 19, PrevEnsureUpperBoundOffset = 20, CombinedLowerBoundVariableOffset = 21, CombinedUpperBoundVariableOffset = 22, CombinedEnsureUpperBoundOffset = 23, CombinedInitOffset = 24, CombinedConditionOffset = 25, CombinedNextLowerBoundOffset = 26, CombinedNextUpperBoundOffset = 27, CombinedDistConditionOffset = 28, CombinedParForInDistConditionOffset = 29, // Offset to the end (and start of the following // counters/updates/finals/dependent_counters/dependent_inits/finals_conditions // arrays) for combined distribute loop directives. CombinedDistributeEnd = 30, }; /// Get the counters storage. MutableArrayRef<Expr *> getCounters() { Expr **Storage = reinterpret_cast<Expr **>( &(*(std::next(child_begin(), getArraysOffset(getDirectiveKind()))))); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the private counters storage. MutableArrayRef<Expr *> getPrivateCounters() { Expr **Storage = reinterpret_cast<Expr **>(&*std::next( child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the updates storage. MutableArrayRef<Expr *> getUpdates() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the final counter updates storage. MutableArrayRef<Expr *> getFinals() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent counters storage. MutableArrayRef<Expr *> getDependentCounters() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 5 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the dependent inits storage. MutableArrayRef<Expr *> getDependentInits() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 6 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } /// Get the finals conditions storage. MutableArrayRef<Expr *> getFinalsConditions() { Expr **Storage = reinterpret_cast<Expr **>( &*std::next(child_begin(), getArraysOffset(getDirectiveKind()) + 7 * CollapsedNum)); return MutableArrayRef<Expr *>(Storage, CollapsedNum); } protected: /// Build instance of loop directive of class \a Kind. /// /// \param SC Statement class. /// \param Kind Kind of OpenMP directive. /// \param StartLoc Starting location of the directive (directive keyword). /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed loops from 'collapse' clause. /// \param NumClauses Number of clauses. /// \param NumSpecialChildren Number of additional directive-specific stmts. /// template <typename T> OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses, unsigned NumSpecialChildren = 0) : OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses, numLoopChildren(CollapsedNum, Kind) + NumSpecialChildren), CollapsedNum(CollapsedNum) {} /// Offset to the start of children expression arrays. static unsigned getArraysOffset(OpenMPDirectiveKind Kind) { if (isOpenMPLoopBoundSharingDirective(Kind)) return CombinedDistributeEnd; if (isOpenMPWorksharingDirective(Kind) || isOpenMPTaskLoopDirective(Kind) || isOpenMPDistributeDirective(Kind)) return WorksharingEnd; return DefaultEnd; } /// Children number. static unsigned numLoopChildren(unsigned CollapsedNum, OpenMPDirectiveKind Kind) { return getArraysOffset(Kind) + 8 * CollapsedNum; // Counters, PrivateCounters, Inits, // Updates, Finals, DependentCounters, // DependentInits, FinalsConditions. } void setIterationVariable(Expr *IV) { *std::next(child_begin(), IterationVariableOffset) = IV; } void setLastIteration(Expr *LI) { *std::next(child_begin(), LastIterationOffset) = LI; } void setCalcLastIteration(Expr *CLI) { *std::next(child_begin(), CalcLastIterationOffset) = CLI; } void setPreCond(Expr *PC) { *std::next(child_begin(), PreConditionOffset) = PC; } void setCond(Expr *Cond) { *std::next(child_begin(), CondOffset) = Cond; } void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; } void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; } void setPreInits(Stmt *PreInits) { *std::next(child_begin(), PreInitsOffset) = PreInits; } void setIsLastIterVariable(Expr *IL) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), IsLastIterVariableOffset) = IL; } void setLowerBoundVariable(Expr *LB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), LowerBoundVariableOffset) = LB; } void setUpperBoundVariable(Expr *UB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), UpperBoundVariableOffset) = UB; } void setStrideVariable(Expr *ST) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), StrideVariableOffset) = ST; } void setEnsureUpperBound(Expr *EUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), EnsureUpperBoundOffset) = EUB; } void setNextLowerBound(Expr *NLB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextLowerBoundOffset) = NLB; } void setNextUpperBound(Expr *NUB) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NextUpperBoundOffset) = NUB; } void setNumIterations(Expr *NI) { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); *std::next(child_begin(), NumIterationsOffset) = NI; } void setPrevLowerBoundVariable(Expr *PrevLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevLowerBoundVariableOffset) = PrevLB; } void setPrevUpperBoundVariable(Expr *PrevUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevUpperBoundVariableOffset) = PrevUB; } void setDistInc(Expr *DistInc) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), DistIncOffset) = DistInc; } void setPrevEnsureUpperBound(Expr *PrevEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), PrevEnsureUpperBoundOffset) = PrevEUB; } void setCombinedLowerBoundVariable(Expr *CombLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedLowerBoundVariableOffset) = CombLB; } void setCombinedUpperBoundVariable(Expr *CombUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedUpperBoundVariableOffset) = CombUB; } void setCombinedEnsureUpperBound(Expr *CombEUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedEnsureUpperBoundOffset) = CombEUB; } void setCombinedInit(Expr *CombInit) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedInitOffset) = CombInit; } void setCombinedCond(Expr *CombCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedConditionOffset) = CombCond; } void setCombinedNextLowerBound(Expr *CombNLB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextLowerBoundOffset) = CombNLB; } void setCombinedNextUpperBound(Expr *CombNUB) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); *std::next(child_begin(), CombinedNextUpperBoundOffset) = CombNUB; } void setCombinedDistCond(Expr *CombDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedDistConditionOffset) = CombDistCond; } void setCombinedParForInDistCond(Expr *CombParForInDistCond) { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); *std::next(child_begin(), CombinedParForInDistConditionOffset) = CombParForInDistCond; } void setCounters(ArrayRef<Expr *> A); void setPrivateCounters(ArrayRef<Expr *> A); void setInits(ArrayRef<Expr *> A); void setUpdates(ArrayRef<Expr *> A); void setFinals(ArrayRef<Expr *> A); void setDependentCounters(ArrayRef<Expr *> A); void setDependentInits(ArrayRef<Expr *> A); void setFinalsConditions(ArrayRef<Expr *> A); public: /// The expressions built to support OpenMP loops in combined/composite /// pragmas (e.g. pragma omp distribute parallel for) struct DistCombinedHelperExprs { /// DistributeLowerBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *LB; /// DistributeUpperBound - used when composing 'omp distribute' with /// 'omp for' in a same construct. Expr *UB; /// DistributeEnsureUpperBound - used when composing 'omp distribute' /// with 'omp for' in a same construct, EUB depends on DistUB Expr *EUB; /// Distribute loop iteration variable init used when composing 'omp /// distribute' /// with 'omp for' in a same construct Expr *Init; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct Expr *Cond; /// Update of LowerBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NLB; /// Update of UpperBound for statically scheduled omp loops for /// outer loop in combined constructs (e.g. 'distribute parallel for') Expr *NUB; /// Distribute Loop condition used when composing 'omp distribute' /// with 'omp for' in a same construct when schedule is chunked. Expr *DistCond; /// 'omp parallel for' loop condition used when composed with /// 'omp distribute' in the same construct and when schedule is /// chunked and the chunk size is 1. Expr *ParForInDistCond; }; /// The expressions built for the OpenMP loop CodeGen for the /// whole collapsed loop nest. struct HelperExprs { /// Loop iteration variable. Expr *IterationVarRef; /// Loop last iteration number. Expr *LastIteration; /// Loop number of iterations. Expr *NumIterations; /// Calculation of last iteration. Expr *CalcLastIteration; /// Loop pre-condition. Expr *PreCond; /// Loop condition. Expr *Cond; /// Loop iteration variable init. Expr *Init; /// Loop increment. Expr *Inc; /// IsLastIteration - local flag variable passed to runtime. Expr *IL; /// LowerBound - local variable passed to runtime. Expr *LB; /// UpperBound - local variable passed to runtime. Expr *UB; /// Stride - local variable passed to runtime. Expr *ST; /// EnsureUpperBound -- expression UB = min(UB, NumIterations). Expr *EUB; /// Update of LowerBound for statically scheduled 'omp for' loops. Expr *NLB; /// Update of UpperBound for statically scheduled 'omp for' loops. Expr *NUB; /// PreviousLowerBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevLB; /// PreviousUpperBound - local variable passed to runtime in the /// enclosing schedule or null if that does not apply. Expr *PrevUB; /// DistInc - increment expression for distribute loop when found /// combined with a further loop level (e.g. in 'distribute parallel for') /// expression IV = IV + ST Expr *DistInc; /// PrevEUB - expression similar to EUB but to be used when loop /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for' /// when ensuring that the UB is either the calculated UB by the runtime or /// the end of the assigned distribute chunk) /// expression UB = min (UB, PrevUB) Expr *PrevEUB; /// Counters Loop counters. SmallVector<Expr *, 4> Counters; /// PrivateCounters Loop counters. SmallVector<Expr *, 4> PrivateCounters; /// Expressions for loop counters inits for CodeGen. SmallVector<Expr *, 4> Inits; /// Expressions for loop counters update for CodeGen. SmallVector<Expr *, 4> Updates; /// Final loop counter values for GodeGen. SmallVector<Expr *, 4> Finals; /// List of counters required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentCounters; /// List of initializers required for the generation of the non-rectangular /// loops. SmallVector<Expr *, 4> DependentInits; /// List of final conditions required for the generation of the /// non-rectangular loops. SmallVector<Expr *, 4> FinalsConditions; /// Init statement for all captured expressions. Stmt *PreInits; /// Expressions used when combining OpenMP loop pragmas DistCombinedHelperExprs DistCombinedFields; /// Check if all the expressions are built (does not check the /// worksharing ones). bool builtAll() { return IterationVarRef != nullptr && LastIteration != nullptr && NumIterations != nullptr && PreCond != nullptr && Cond != nullptr && Init != nullptr && Inc != nullptr; } /// Initialize all the fields to null. /// \param Size Number of elements in the /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions /// arrays. void clear(unsigned Size) { IterationVarRef = nullptr; LastIteration = nullptr; CalcLastIteration = nullptr; PreCond = nullptr; Cond = nullptr; Init = nullptr; Inc = nullptr; IL = nullptr; LB = nullptr; UB = nullptr; ST = nullptr; EUB = nullptr; NLB = nullptr; NUB = nullptr; NumIterations = nullptr; PrevLB = nullptr; PrevUB = nullptr; DistInc = nullptr; PrevEUB = nullptr; Counters.resize(Size); PrivateCounters.resize(Size); Inits.resize(Size); Updates.resize(Size); Finals.resize(Size); DependentCounters.resize(Size); DependentInits.resize(Size); FinalsConditions.resize(Size); for (unsigned i = 0; i < Size; ++i) { Counters[i] = nullptr; PrivateCounters[i] = nullptr; Inits[i] = nullptr; Updates[i] = nullptr; Finals[i] = nullptr; DependentCounters[i] = nullptr; DependentInits[i] = nullptr; FinalsConditions[i] = nullptr; } PreInits = nullptr; DistCombinedFields.LB = nullptr; DistCombinedFields.UB = nullptr; DistCombinedFields.EUB = nullptr; DistCombinedFields.Init = nullptr; DistCombinedFields.Cond = nullptr; DistCombinedFields.NLB = nullptr; DistCombinedFields.NUB = nullptr; DistCombinedFields.DistCond = nullptr; DistCombinedFields.ParForInDistCond = nullptr; } }; /// Get number of collapsed loops. unsigned getCollapsedNumber() const { return CollapsedNum; } Expr *getIterationVariable() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IterationVariableOffset))); } Expr *getLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LastIterationOffset))); } Expr *getCalcLastIteration() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CalcLastIterationOffset))); } Expr *getPreCond() const { return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PreConditionOffset))); } Expr *getCond() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset))); } Expr *getInit() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset))); } Expr *getInc() const { return const_cast<Expr *>( reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset))); } const Stmt *getPreInits() const { return *std::next(child_begin(), PreInitsOffset); } Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); } Expr *getIsLastIterVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), IsLastIterVariableOffset))); } Expr *getLowerBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), LowerBoundVariableOffset))); } Expr *getUpperBoundVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), UpperBoundVariableOffset))); } Expr *getStrideVariable() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), StrideVariableOffset))); } Expr *getEnsureUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), EnsureUpperBoundOffset))); } Expr *getNextLowerBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextLowerBoundOffset))); } Expr *getNextUpperBound() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NextUpperBoundOffset))); } Expr *getNumIterations() const { assert((isOpenMPWorksharingDirective(getDirectiveKind()) || isOpenMPTaskLoopDirective(getDirectiveKind()) || isOpenMPDistributeDirective(getDirectiveKind())) && "expected worksharing loop directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), NumIterationsOffset))); } Expr *getPrevLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevLowerBoundVariableOffset))); } Expr *getPrevUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevUpperBoundVariableOffset))); } Expr *getDistInc() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), DistIncOffset))); } Expr *getPrevEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), PrevEnsureUpperBoundOffset))); } Expr *getCombinedLowerBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedLowerBoundVariableOffset))); } Expr *getCombinedUpperBoundVariable() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedUpperBoundVariableOffset))); } Expr *getCombinedEnsureUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedEnsureUpperBoundOffset))); } Expr *getCombinedInit() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedInitOffset))); } Expr *getCombinedCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedConditionOffset))); } Expr *getCombinedNextLowerBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextLowerBoundOffset))); } Expr *getCombinedNextUpperBound() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedNextUpperBoundOffset))); } Expr *getCombinedDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedDistConditionOffset))); } Expr *getCombinedParForInDistCond() const { assert(isOpenMPLoopBoundSharingDirective(getDirectiveKind()) && "expected loop bound distribute sharing directive"); return const_cast<Expr *>(reinterpret_cast<const Expr *>( *std::next(child_begin(), CombinedParForInDistConditionOffset))); } const Stmt *getBody() const { // This relies on the loop form is already checked by Sema. const Stmt *Body = getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers(); if (auto *For = dyn_cast<ForStmt>(Body)) { Body = For->getBody(); } else { assert(isa<CXXForRangeStmt>(Body) && "Expected canonical for loop or range-based for loop."); Body = cast<CXXForRangeStmt>(Body)->getBody(); } for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) { Body = Body->IgnoreContainers(); if (auto *For = dyn_cast<ForStmt>(Body)) { Body = For->getBody(); } else { assert(isa<CXXForRangeStmt>(Body) && "Expected canonical for loop or range-based for loop."); Body = cast<CXXForRangeStmt>(Body)->getBody(); } } return Body; } ArrayRef<Expr *> counters() { return getCounters(); } ArrayRef<Expr *> counters() const { return const_cast<OMPLoopDirective *>(this)->getCounters(); } ArrayRef<Expr *> private_counters() { return getPrivateCounters(); } ArrayRef<Expr *> private_counters() const { return const_cast<OMPLoopDirective *>(this)->getPrivateCounters(); } ArrayRef<Expr *> inits() { return getInits(); } ArrayRef<Expr *> inits() const { return const_cast<OMPLoopDirective *>(this)->getInits(); } ArrayRef<Expr *> updates() { return getUpdates(); } ArrayRef<Expr *> updates() const { return const_cast<OMPLoopDirective *>(this)->getUpdates(); } ArrayRef<Expr *> finals() { return getFinals(); } ArrayRef<Expr *> finals() const { return const_cast<OMPLoopDirective *>(this)->getFinals(); } ArrayRef<Expr *> dependent_counters() { return getDependentCounters(); } ArrayRef<Expr *> dependent_counters() const { return const_cast<OMPLoopDirective *>(this)->getDependentCounters(); } ArrayRef<Expr *> dependent_inits() { return getDependentInits(); } ArrayRef<Expr *> dependent_inits() const { return const_cast<OMPLoopDirective *>(this)->getDependentInits(); } ArrayRef<Expr *> finals_conditions() { return getFinalsConditions(); } ArrayRef<Expr *> finals_conditions() const { return const_cast<OMPLoopDirective *>(this)->getFinalsConditions(); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass || T->getStmtClass() == OMPForDirectiveClass || T->getStmtClass() == OMPForSimdDirectiveClass || T->getStmtClass() == OMPParallelForDirectiveClass || T->getStmtClass() == OMPParallelForSimdDirectiveClass || T->getStmtClass() == OMPTaskLoopDirectiveClass || T->getStmtClass() == OMPTaskLoopSimdDirectiveClass || T->getStmtClass() == OMPMasterTaskLoopDirectiveClass || T->getStmtClass() == OMPDistributeDirectiveClass || T->getStmtClass() == OMPTargetParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForDirectiveClass || T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPDistributeSimdDirectiveClass || T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass || T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp simd' directive. /// /// \code /// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSimdDirectiveClass; } }; /// This represents '#pragma omp for' directive. /// /// \code /// #pragma omp for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for' has clauses 'private' with the /// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c' /// and 'd'. /// class OMPForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForDirectiveClass; } }; /// This represents '#pragma omp for simd' directive. /// /// \code /// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp for simd' has clauses 'private' /// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and /// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'. /// class OMPForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPForSimdDirectiveClass; } }; /// This represents '#pragma omp sections' directive. /// /// \code /// #pragma omp sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp sections' has clauses 'private' with /// the variables 'a' and 'b' and 'reduction' with operator '+' and variables /// 'c' and 'd'. /// class OMPSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSectionsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionsDirectiveClass; } }; /// This represents '#pragma omp section' directive. /// /// \code /// #pragma omp section /// \endcode /// class OMPSectionDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, StartLoc, EndLoc, 0, 1), HasCancel(false) {} /// Build an empty directive. /// explicit OMPSectionDirective() : OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section, SourceLocation(), SourceLocation(), 0, 1), HasCancel(false) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner directive. /// static OMPSectionDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive. /// /// \param C AST context. /// static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSectionDirectiveClass; } }; /// This represents '#pragma omp single' directive. /// /// \code /// #pragma omp single private(a,b) copyprivate(c,d) /// \endcode /// In this example directive '#pragma omp single' has clauses 'private' with /// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'. /// class OMPSingleDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPSingleDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPSingleDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPSingleDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPSingleDirectiveClass; } }; /// This represents '#pragma omp master' directive. /// /// \code /// #pragma omp master /// \endcode /// class OMPMasterDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, StartLoc, EndLoc, 0, 1) {} /// Build an empty directive. /// explicit OMPMasterDirective() : OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master, SourceLocation(), SourceLocation(), 0, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPMasterDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterDirectiveClass; } }; /// This represents '#pragma omp critical' directive. /// /// \code /// #pragma omp critical /// \endcode /// class OMPCriticalDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Name of the directive. DeclarationNameInfo DirName; /// Build directive with the given start and end location. /// /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, StartLoc, EndLoc, NumClauses, 1), DirName(Name) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPCriticalDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical, SourceLocation(), SourceLocation(), NumClauses, 1), DirName() {} /// Set name of the directive. /// /// \param Name Name of the directive. /// void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; } public: /// Creates directive. /// /// \param C AST context. /// \param Name Name of the directive. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPCriticalDirective * Create(const ASTContext &C, const DeclarationNameInfo &Name, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCriticalDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return name of the directive. /// DeclarationNameInfo getDirectiveName() const { return DirName; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCriticalDirectiveClass; } }; /// This represents '#pragma omp parallel for' directive. /// /// \code /// #pragma omp parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for' has clauses 'private' /// with the variables 'a' and 'b' and 'reduction' with operator '+' and /// variables 'c' and 'd'. /// class OMPParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForDirectiveClass; } }; /// This represents '#pragma omp parallel for simd' directive. /// /// \code /// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel for simd' has clauses /// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j' /// and linear step 's', 'reduction' with operator '+' and variables 'c' and /// 'd'. /// class OMPParallelForSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPParallelForSimdDirectiveClass, OMPD_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp parallel sections' directive. /// /// \code /// #pragma omp parallel sections private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp parallel sections' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPParallelSectionsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if current directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPParallelSectionsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass, OMPD_parallel_sections, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPParallelSectionsDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPParallelSectionsDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPParallelSectionsDirectiveClass; } }; /// This represents '#pragma omp task' directive. /// /// \code /// #pragma omp task private(a,b) final(d) /// \endcode /// In this example directive '#pragma omp task' has clauses 'private' with the /// variables 'a' and 'b' and 'final' with condition 'd'. /// class OMPTaskDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// true if this directive has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc, EndLoc, NumClauses, 1), HasCancel(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTaskDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, SourceLocation(), SourceLocation(), NumClauses, 1), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param HasCancel true, if current directive has inner cancel directive. /// static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskDirectiveClass; } }; /// This represents '#pragma omp taskyield' directive. /// /// \code /// #pragma omp taskyield /// \endcode /// class OMPTaskyieldDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskyieldDirective() : OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskyieldDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskyieldDirectiveClass; } }; /// This represents '#pragma omp barrier' directive. /// /// \code /// #pragma omp barrier /// \endcode /// class OMPBarrierDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPBarrierDirective() : OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPBarrierDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPBarrierDirectiveClass; } }; /// This represents '#pragma omp taskwait' directive. /// /// \code /// #pragma omp taskwait /// \endcode /// class OMPTaskwaitDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, StartLoc, EndLoc, 0, 0) {} /// Build an empty directive. /// explicit OMPTaskwaitDirective() : OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait, SourceLocation(), SourceLocation(), 0, 0) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPTaskwaitDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates an empty directive. /// /// \param C AST context. /// static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskwaitDirectiveClass; } }; /// This represents '#pragma omp taskgroup' directive. /// /// \code /// #pragma omp taskgroup /// \endcode /// class OMPTaskgroupDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, StartLoc, EndLoc, NumClauses, 2) {} /// Build an empty directive. /// \param NumClauses Number of clauses. /// explicit OMPTaskgroupDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup, SourceLocation(), SourceLocation(), NumClauses, 2) {} /// Sets the task_reduction return variable. void setReductionRef(Expr *RR) { *std::next(child_begin(), 1) = RR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param ReductionRef Reference to the task_reduction return variable. /// static OMPTaskgroupDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *ReductionRef); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Returns reference to the task_reduction return variable. const Expr *getReductionRef() const { return static_cast<const Expr *>(*std::next(child_begin(), 1)); } Expr *getReductionRef() { return static_cast<Expr *>(*std::next(child_begin(), 1)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskgroupDirectiveClass; } }; /// This represents '#pragma omp flush' directive. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has 2 arguments- variables 'a' /// and 'b'. /// 'omp flush' directive does not have clauses but have an optional list of /// variables to flush. This list of variables is stored within some fake clause /// FlushClause. class OMPFlushDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, StartLoc, EndLoc, NumClauses, 0) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPFlushDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush, SourceLocation(), SourceLocation(), NumClauses, 0) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses (only single OMPFlushClause clause is /// allowed). /// static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPFlushDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPFlushDirectiveClass; } }; /// This represents '#pragma omp ordered' directive. /// /// \code /// #pragma omp ordered /// \endcode /// class OMPOrderedDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPOrderedDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPOrderedDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPOrderedDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPOrderedDirectiveClass; } }; /// This represents '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has clause 'capture'. /// class OMPAtomicDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// x = x binop expr; /// x = expr binop x; /// \endcode /// This field is true for the first form of the expression and false for the /// second. Required for correct codegen of non-associative operations (like /// << or >>). bool IsXLHSInRHSPart; /// Used for 'atomic update' or 'atomic capture' constructs. They may /// have atomic expressions of forms /// \code /// v = x; <update x>; /// <update x>; v = x; /// \endcode /// This field is true for the first(postfix) form of the expression and false /// otherwise. bool IsPostfixUpdate; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, StartLoc, EndLoc, NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPAtomicDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic, SourceLocation(), SourceLocation(), NumClauses, 5), IsXLHSInRHSPart(false), IsPostfixUpdate(false) {} /// Set 'x' part of the associated expression/statement. void setX(Expr *X) { *std::next(child_begin()) = X; } /// Set helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; } /// Set 'v' part of the associated expression/statement. void setV(Expr *V) { *std::next(child_begin(), 3) = V; } /// Set 'expr' part of the associated expression/statement. void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; } public: /// Creates directive with a list of \a Clauses and 'x', 'v' and 'expr' /// parts of the atomic construct (see Section 2.12.6, atomic Construct, for /// detailed description of 'x', 'v' and 'expr'). /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param X 'x' part of the associated expression/statement. /// \param V 'v' part of the associated expression/statement. /// \param E 'expr' part of the associated expression/statement. /// \param UE Helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. /// \param IsXLHSInRHSPart true if \a UE has the first form and false if the /// second. /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. static OMPAtomicDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V, Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPAtomicDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get 'x' part of the associated expression/statement. Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); } const Expr *getX() const { return cast_or_null<Expr>(*std::next(child_begin())); } /// Get helper expression of the form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. Expr *getUpdateExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } const Expr *getUpdateExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 2)); } /// Return true if helper update expression has form /// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form /// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'. bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; } /// Return true if 'v' expression must be updated to original value of /// 'x', false if 'v' must be updated to the new value of 'x'. bool isPostfixUpdate() const { return IsPostfixUpdate; } /// Get 'v' part of the associated expression/statement. Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } const Expr *getV() const { return cast_or_null<Expr>(*std::next(child_begin(), 3)); } /// Get 'expr' part of the associated expression/statement. Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } const Expr *getExpr() const { return cast_or_null<Expr>(*std::next(child_begin(), 4)); } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPAtomicDirectiveClass; } }; /// This represents '#pragma omp target' directive. /// /// \code /// #pragma omp target if(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'if' with /// condition 'a'. /// class OMPTargetDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDirectiveClass; } }; /// This represents '#pragma omp target data' directive. /// /// \code /// #pragma omp target data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target data' has clauses 'device' /// with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetDataDirectiveClass, OMPD_target_data, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetDataDirectiveClass; } }; /// This represents '#pragma omp target enter data' directive. /// /// \code /// #pragma omp target enter data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target enter data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetEnterDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetEnterDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass, OMPD_target_enter_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetEnterDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetEnterDataDirectiveClass; } }; /// This represents '#pragma omp target exit data' directive. /// /// \code /// #pragma omp target exit data device(0) if(a) map(b[:]) /// \endcode /// In this example directive '#pragma omp target exit data' has clauses /// 'device' with the value '0', 'if' with condition 'a' and 'map' with array /// section 'b[:]'. /// class OMPTargetExitDataDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetExitDataDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass, OMPD_target_exit_data, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetExitDataDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a N clauses. /// /// \param C AST context. /// \param N The number of clauses. /// static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C, unsigned N, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetExitDataDirectiveClass; } }; /// This represents '#pragma omp target parallel' directive. /// /// \code /// #pragma omp target parallel if(a) /// \endcode /// In this example directive '#pragma omp target parallel' has clause 'if' with /// condition 'a'. /// class OMPTargetParallelDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, StartLoc, EndLoc, NumClauses, /*NumChildren=*/1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetParallelDirectiveClass, OMPD_target_parallel, SourceLocation(), SourceLocation(), NumClauses, /*NumChildren=*/1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetParallelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetParallelDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelDirectiveClass; } }; /// This represents '#pragma omp target parallel for' directive. /// /// \code /// #pragma omp target parallel for private(a,b) reduction(+:c,d) /// \endcode /// In this example directive '#pragma omp target parallel for' has clauses /// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+' /// and variables 'c' and 'd'. /// class OMPTargetParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if current region has inner cancel directive. bool HasCancel; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForDirectiveClass, OMPD_target_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if current directive has inner cancel directive. /// static OMPTargetParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForDirectiveClass; } }; /// This represents '#pragma omp teams' directive. /// /// \code /// #pragma omp teams if(a) /// \endcode /// In this example directive '#pragma omp teams' has clause 'if' with /// condition 'a'. /// class OMPTeamsDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDirectiveClass; } }; /// This represents '#pragma omp cancellation point' directive. /// /// \code /// #pragma omp cancellation point for /// \endcode /// /// In this example a cancellation point is created for innermost 'for' region. class OMPCancellationPointDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc) : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, StartLoc, EndLoc, 0, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// explicit OMPCancellationPointDirective() : OMPExecutableDirective(this, OMPCancellationPointDirectiveClass, OMPD_cancellation_point, SourceLocation(), SourceLocation(), 0, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// static OMPCancellationPointDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancellationPointDirectiveClass; } }; /// This represents '#pragma omp cancel' directive. /// /// \code /// #pragma omp cancel for /// \endcode /// /// In this example a cancel is created for innermost 'for' region. class OMPCancelDirective : public OMPExecutableDirective { friend class ASTStmtReader; OpenMPDirectiveKind CancelRegion; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, StartLoc, EndLoc, NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. explicit OMPCancelDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel, SourceLocation(), SourceLocation(), NumClauses, 0), CancelRegion(OMPD_unknown) {} /// Set cancel region for current cancellation point. /// \param CR Cancellation region. void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; } public: /// Creates directive. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// static OMPCancelDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion); /// Creates an empty directive. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPCancelDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); /// Get cancellation region for the current cancellation point. OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPCancelDirectiveClass; } }; /// This represents '#pragma omp taskloop' directive. /// /// \code /// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopDirectiveClass; } }; /// This represents '#pragma omp taskloop simd' directive. /// /// \code /// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp taskloop simd' has clauses 'private' /// with the variables 'a' and 'b', 'grainsize' with expression 'val' and /// 'num_tasks' with expression 'num'. /// class OMPTaskLoopSimdDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass, OMPD_taskloop_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTaskLoopSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass; } }; /// This represents '#pragma omp master taskloop' directive. /// /// \code /// #pragma omp master taskloop private(a,b) grainsize(val) num_tasks(num) /// \endcode /// In this example directive '#pragma omp master taskloop' has clauses /// 'private' with the variables 'a' and 'b', 'grainsize' with expression 'val' /// and 'num_tasks' with expression 'num'. /// class OMPMasterTaskLoopDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPMasterTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, OMPD_master_taskloop, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPMasterTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPMasterTaskLoopDirectiveClass, OMPD_master_taskloop, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPMasterTaskLoopDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPMasterTaskLoopDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPMasterTaskLoopDirectiveClass; } }; /// This represents '#pragma omp distribute' directive. /// /// \code /// #pragma omp distribute private(a,b) /// \endcode /// In this example directive '#pragma omp distribute' has clauses 'private' /// with the variables 'a' and 'b' /// class OMPDistributeDirective : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeDirectiveClass; } }; /// This represents '#pragma omp target update' directive. /// /// \code /// #pragma omp target update to(a) from(b) device(1) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' with /// argument 'a', clause 'from' with argument 'b' and clause 'device' with /// argument '1'. /// class OMPTargetUpdateDirective : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param NumClauses The number of clauses. /// OMPTargetUpdateDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetUpdateDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetUpdateDirectiveClass, OMPD_target_update, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetUpdateDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses /// clauses. /// /// \param C AST context. /// \param NumClauses The number of clauses. /// static OMPTargetUpdateDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetUpdateDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for' composite /// directive. /// /// \code /// #pragma omp distribute parallel for private(a,b) /// \endcode /// In this example directive '#pragma omp distribute parallel for' has clause /// 'private' with the variables 'a' and 'b' /// class OMPDistributeParallelForDirective : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForDirectiveClass, OMPD_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp distribute parallel for simd' has /// clause 'private' with the variables 'x' /// class OMPDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeParallelForSimdDirectiveClass, OMPD_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeParallelForSimdDirective *Create( const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeParallelForSimdDirective *CreateEmpty( const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp distribute simd' composite directive. /// /// \code /// #pragma omp distribute simd private(x) /// \endcode /// In this example directive '#pragma omp distribute simd' has clause /// 'private' with the variables 'x' /// class OMPDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPDistributeSimdDirectiveClass, OMPD_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp target parallel for simd' directive. /// /// \code /// #pragma omp target parallel for simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target parallel for simd' has clauses /// 'private' with the variable 'a', 'map' with the variable 'b' and 'safelen' /// with the variable 'c'. /// class OMPTargetParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetParallelForSimdDirectiveClass, OMPD_target_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetParallelForSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target simd' directive. /// /// \code /// #pragma omp target simd private(a) map(b) safelen(c) /// \endcode /// In this example directive '#pragma omp target simd' has clauses 'private' /// with the variable 'a', 'map' with the variable 'b' and 'safelen' with /// the variable 'c'. /// class OMPTargetSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetSimdDirectiveClass, OMPD_target_simd, SourceLocation(),SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute' directive. /// /// \code /// #pragma omp teams distribute private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute' has clauses /// 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeDirectiveClass, OMPD_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp teams distribute simd' /// combined directive. /// /// \code /// #pragma omp teams distribute simd private(a,b) /// \endcode /// In this example directive '#pragma omp teams distribute simd' /// has clause 'private' with the variables 'a' and 'b' /// class OMPTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeSimdDirectiveClass, OMPD_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place /// for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for simd' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for simd' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForSimdDirectiveClass, OMPD_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp teams distribute parallel for' composite /// directive. /// /// \code /// #pragma omp teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp teams distribute parallel for' /// has clause 'private' with the variables 'x' /// class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTeamsDistributeParallelForDirectiveClass, OMPD_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams' directive. /// /// \code /// #pragma omp target teams if(a>0) /// \endcode /// In this example directive '#pragma omp target teams' has clause 'if' with /// condition 'a>0'. /// class OMPTargetTeamsDirective final : public OMPExecutableDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, StartLoc, EndLoc, NumClauses, 1) {} /// Build an empty directive. /// /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDirective(unsigned NumClauses) : OMPExecutableDirective(this, OMPTargetTeamsDirectiveClass, OMPD_target_teams, SourceLocation(), SourceLocation(), NumClauses, 1) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// static OMPTargetTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDirectiveClass; } }; /// This represents '#pragma omp target teams distribute' combined directive. /// /// \code /// #pragma omp target teams distribute private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute' has clause /// 'private' with the variables 'x' /// class OMPTargetTeamsDistributeDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeDirectiveClass, OMPD_target_teams_distribute, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for' combined /// directive. /// /// \code /// #pragma omp target teams distribute parallel for private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// true if the construct has inner cancel directive. bool HasCancel = false; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, StartLoc, EndLoc, CollapsedNum, NumClauses), HasCancel(false) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForDirectiveClass, OMPD_target_teams_distribute_parallel_for, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses), HasCancel(false) {} /// Set cancel state. void setHasCancel(bool Has) { HasCancel = Has; } public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// \param HasCancel true if this directive has inner cancel directive. /// static OMPTargetTeamsDistributeParallelForDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); /// Return true if current directive has inner cancel directive. bool hasCancel() const { return HasCancel; } static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForDirectiveClass; } }; /// This represents '#pragma omp target teams distribute parallel for simd' /// combined directive. /// /// \code /// #pragma omp target teams distribute parallel for simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute parallel /// for simd' has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeParallelForSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeParallelForSimdDirective( unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective( this, OMPTargetTeamsDistributeParallelForSimdDirectiveClass, OMPD_target_teams_distribute_parallel_for_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeParallelForSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeParallelForSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeParallelForSimdDirectiveClass; } }; /// This represents '#pragma omp target teams distribute simd' combined /// directive. /// /// \code /// #pragma omp target teams distribute simd private(x) /// \endcode /// In this example directive '#pragma omp target teams distribute simd' /// has clause 'private' with the variables 'x' /// class OMPTargetTeamsDistributeSimdDirective final : public OMPLoopDirective { friend class ASTStmtReader; /// Build directive with the given start and end location. /// /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending location of the directive. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// OMPTargetTeamsDistributeSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, StartLoc, EndLoc, CollapsedNum, NumClauses) {} /// Build an empty directive. /// /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// explicit OMPTargetTeamsDistributeSimdDirective(unsigned CollapsedNum, unsigned NumClauses) : OMPLoopDirective(this, OMPTargetTeamsDistributeSimdDirectiveClass, OMPD_target_teams_distribute_simd, SourceLocation(), SourceLocation(), CollapsedNum, NumClauses) {} public: /// Creates directive with a list of \a Clauses. /// /// \param C AST context. /// \param StartLoc Starting location of the directive kind. /// \param EndLoc Ending Location of the directive. /// \param CollapsedNum Number of collapsed loops. /// \param Clauses List of clauses. /// \param AssociatedStmt Statement, associated with the directive. /// \param Exprs Helper expressions for CodeGen. /// static OMPTargetTeamsDistributeSimdDirective * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc, unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, const HelperExprs &Exprs); /// Creates an empty directive with the place for \a NumClauses clauses. /// /// \param C AST context. /// \param CollapsedNum Number of collapsed nested loops. /// \param NumClauses Number of clauses. /// static OMPTargetTeamsDistributeSimdDirective * CreateEmpty(const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum, EmptyShell); static bool classof(const Stmt *T) { return T->getStmtClass() == OMPTargetTeamsDistributeSimdDirectiveClass; } }; } // end namespace clang #endif
1.c
#include <omp.h> #include <stdio.h> int main() { #pragma omp parallel num_threads(2) for (int i = 0; i < 100; i++) { int id = omp_get_thread_num(); printf("T%d:i%d ", id, i); fflush(stdout); } }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % John Cristy % % July 1992 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices: % The vertex nearest the origin in RGB space and the vertex farthest from % the origin. % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of % pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/histogram.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _RealPixelPacket { MagickRealType red, green, blue, opacity; } RealPixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; RealPixelPacket total_color; MagickRealType quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; RealPixelPacket target; MagickRealType distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; ssize_t *cache; RealPixelPacket error[ErrorQueueLength]; MagickRealType weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *), SetGrayscaleImage(Image *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(const Image *,CubeInfo *,const NodeInfo *), PruneToCubeDepth(const Image *,CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither=image_info->dither; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const CubeInfo *cube_info, const PixelPacket *pixel,RealPixelPacket *alpha_pixel) { MagickRealType alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->opacity == OpaqueOpacity)) { alpha_pixel->red=(MagickRealType) GetPixelRed(pixel); alpha_pixel->green=(MagickRealType) GetPixelGreen(pixel); alpha_pixel->blue=(MagickRealType) GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); return; } alpha=(MagickRealType) (QuantumScale*(QuantumRange-GetPixelOpacity(pixel))); alpha_pixel->red=alpha*GetPixelRed(pixel); alpha_pixel->green=alpha*GetPixelGreen(pixel); alpha_pixel->blue=alpha*GetPixelBlue(pixel); alpha_pixel->opacity=(MagickRealType) GetPixelOpacity(pixel); } static inline Quantum ClampPixel(const MagickRealType value) { if (value < 0.0f) return(0); if (value >= (MagickRealType) QuantumRange) return((Quantum) QuantumRange); #if !defined(MAGICKCORE_HDRI_SUPPORT) return((Quantum) (value+0.5f)); #else return(value); #endif } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const RealPixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(GetPixelRed(pixel))) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(GetPixelGreen(pixel))) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(GetPixelBlue(pixel))) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(GetPixelOpacity(pixel))) >> index) & 0x1) << 3; return(id); } static inline MagickBooleanType IsSameColor(const Image *image, const PixelPacket *p,const PixelPacket *q) { if ((GetPixelRed(p) != GetPixelRed(q)) || (GetPixelGreen(p) != GetPixelGreen(q)) || (GetPixelBlue(p) != GetPixelBlue(q))) return(MagickFalse); if ((image->matte != MagickFalse) && (GetPixelOpacity(p) != GetPixelOpacity(q))) return(MagickFalse); return(MagickTrue); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) { #define AssignImageTag "Assign/Image" ssize_t y; /* Allocate image colormap. */ if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace); if (AcquireImageColormap(image,cube_info->colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if ((cube_info->quantize_info->dither != MagickFalse) && (cube_info->quantize_info->dither_method != NoDitherMethod)) (void) DitherImage(image,cube_info); else { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { RealPixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) if (IsSameColor(image,q,q+count) == MagickFalse) break; AssociateAlphaPixel(&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)* (QuantumRange+1.0)+1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(indexes+x+i,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } q++; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AssignImageColors) #endif proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image); if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) { Quantum intensity; register PixelPacket *restrict q; register ssize_t i; /* Monochrome image. */ q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { intensity=(Quantum) (GetPixelIntensity(image,q) < ((MagickRealType) QuantumRange/2.0) ? 0 : QuantumRange); SetPixelRed(q,intensity); SetPixelGreen(q,intensity); SetPixelBlue(q,intensity); q++; } } (void) SyncImage(image); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->matte; if (cube_info->quantize_info->colorspace == TransparentColorspace) associate_alpha=MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; MagickBooleanType proceed; MagickRealType bisect; NodeInfo *node_info; RealPixelPacket error, mid, midpoint, pixel; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace); midpoint.red=(MagickRealType) QuantumRange/2.0; midpoint.green=(MagickRealType) QuantumRange/2.0; midpoint.blue=(MagickRealType) QuantumRange/2.0; midpoint.opacity=(MagickRealType) QuantumRange/2.0; error.opacity=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=sqrt((double) (count*error.red*error.red+ count*error.green*error.green+count*error.blue*error.blue+ count*error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale* ClampPixel(pixel.opacity); p+=count; } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(image,cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(image,cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) if (IsSameColor(image,p,p+count) == MagickFalse) break; AssociateAlphaPixel(cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((MagickRealType) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.opacity+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.opacity=QuantumScale*(pixel.opacity-mid.opacity); node_info->quantize_error+=sqrt((double) (count*error.red*error.red+ count*error.green*error.green+count*error.blue*error.blue+ count*error.opacity*error.opacity)); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.opacity+=count*QuantumScale*ClampPixel( pixel.opacity); p+=count; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither=quantize_info->dither; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { MagickRealType pixel; register MagickRealType alpha, beta, distance; register PixelPacket *restrict p; register RealPixelPacket *restrict q; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(MagickRealType) (QuantumScale*GetPixelAlpha(p)); beta=(MagickRealType) (QuantumScale*GetPixelAlpha(q)); } pixel=alpha*GetPixelRed(p)-beta*GetPixelRed(q); distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelGreen(p)-beta*GetPixelGreen(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*GetPixelBlue(p)-beta*GetPixelBlue(q); distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha-beta; distance+=pixel*pixel; if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsPaletteImage(image,&image->exception) == MagickFalse) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register MagickRealType alpha; register PixelPacket *restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(MagickRealType) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); SetPixelOpacity(q,OpaqueOpacity); } else { MagickRealType opacity; opacity=(MagickRealType) (alpha*QuantumRange* node_info->total_color.opacity); SetPixelOpacity(q,ClampToQuantum(opacity)); if (q->opacity == OpaqueOpacity) { SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* QuantumRange*node_info->total_color.blue))); } else { double gamma; gamma=(double) (QuantumScale*(QuantumRange-(double) q->opacity)); gamma=PerceptibleReciprocal(gamma); SetPixelRed(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (alpha* gamma*QuantumRange*node_info->total_color.blue))); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->cache != (ssize_t *) NULL) cube_info->cache=(ssize_t *) RelinquishMagickMemory(cube_info->cache); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); quantize_info->signature=(~MagickSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static RealPixelPacket **DestroyPixelThreadSet(RealPixelPacket **pixels) { register ssize_t i; assert(pixels != (RealPixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (RealPixelPacket *) NULL) pixels[i]=(RealPixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(RealPixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static RealPixelPacket **AcquirePixelThreadSet(const size_t count) { RealPixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(RealPixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (RealPixelPacket **) NULL) return((RealPixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(RealPixelPacket *) AcquireQuantumMemory(count, 2*sizeof(**pixels)); if (pixels[i] == (RealPixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const RealPixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->opacity))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info) { #define DitherImageTag "Dither/Image" CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; RealPixelPacket **pixels; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (RealPixelPacket **) NULL) return(MagickFalse); exception=(&image->exception); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; RealPixelPacket *current, *previous; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { RealPixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(&cube,q+u,&pixel); if (x > 0) { pixel.red+=7*current[u-v].red/16; pixel.green+=7*current[u-v].green/16; pixel.blue+=7*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=7*current[u-v].opacity/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=previous[u+v].opacity/16; } pixel.red+=5*previous[u].red/16; pixel.green+=5*previous[u].green/16; pixel.blue+=5*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=5*previous[u].opacity/16; if (x > 0) { pixel.red+=3*previous[u-v].red/16; pixel.green+=3*previous[u-v].green/16; pixel.blue+=3*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.opacity+=3*previous[u-v].opacity/16; } } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(MagickRealType) (4.0*(QuantumRange+1.0)*(QuantumRange+ 1.0)+1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(indexes+u,index); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRgb(q+u,image->colormap+index); if (cube.associate_alpha != MagickFalse) SetPixelOpacity(q+u,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixel(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].opacity=pixel.opacity-color.opacity; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FloydSteinbergDither) #endif proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,EastGravity); Riemersma(image,image_view,cube_info,level-1,NorthGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,EastGravity); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,WestGravity); Riemersma(image,image_view,cube_info,level-1,SouthGravity); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity); Riemersma(image,image_view,cube_info,level-1,WestGravity); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction) { #define DitherImageTag "Dither/Image" MagickBooleanType proceed; RealPixelPacket color, pixel; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { ExceptionInfo *exception; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t i; /* Distribute error. */ exception=(&image->exception); q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewAuthenticIndexQueue(image_view); AssociateAlphaPixel(cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.opacity+=p->weights[i]*p->error[i].opacity; } pixel.red=(MagickRealType) ClampPixel(pixel.red); pixel.green=(MagickRealType) ClampPixel(pixel.green); pixel.blue=(MagickRealType) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.opacity=(MagickRealType) ClampPixel(pixel.opacity); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } node_info=node_info->parent; /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(MagickRealType) (4.0*(QuantumRange+1.0)*((MagickRealType) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) (1*p->cache[i]); if (image->storage_class == PseudoClass) *indexes=(IndexPacket) index; if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRgb(q,image->colormap+index); if (cube_info->associate_alpha != MagickFalse) SetPixelOpacity(q,image->colormap[index].opacity); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixel(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].opacity=pixel.opacity-color.opacity; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static inline ssize_t MagickMax(const ssize_t x,const ssize_t y) { if (x > y) return(x); return(y); } static inline ssize_t MagickMin(const ssize_t x,const ssize_t y) { if (x < y) return(x); return(y); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info)); /* Distribute quantization error along a Hilbert curve. */ (void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,&image->exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; MagickRealType sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) ResetMagickMemory(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither == MagickFalse) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->cache=(ssize_t *) AcquireQuantumMemory(length, sizeof(*cube_info->cache)); if (cube_info->cache == (ssize_t *) NULL) return((CubeInfo *) NULL); /* Initialize color cache. */ for (i=0; i < (ssize_t) length; i++) cube_info->cache[i]=(-1); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) ResetMagickMemory(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image) % % A description of each parameter follows. % % o image: the image. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image) { CacheView *image_view; ExceptionInfo *exception; IndexPacket *indexes; MagickRealType alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,&image->exception); (void) ResetMagickMemory(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { index=1UL*GetPixelIndex(indexes+x); if (image->matte != MagickFalse) { alpha=(MagickRealType) (QuantumScale*(GetPixelAlpha(p))); beta=(MagickRealType) (QuantumScale*(QuantumRange- image->colormap[index].opacity)); } distance=fabs(alpha*GetPixelRed(p)-beta*image->colormap[index].red); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelGreen(p)-beta*image->colormap[index].green); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs(alpha*GetPixelBlue(p)-beta*image->colormap[index].blue); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p++; } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither=MagickTrue; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const MagickBooleanType dither) % MagickBooleanType PosterizeImageChannel(Image *image, % const ChannelType channel,const size_t levels, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither: Set this integer value to something other than zero to dither % the mapped image. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const MagickBooleanType dither) { MagickBooleanType status; status=PosterizeImageChannel(image,DefaultChannels,levels,dither); return(status); } MagickExport MagickBooleanType PosterizeImageChannel(Image *image, const ChannelType channel,const size_t levels,const MagickBooleanType dither) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((channel & RedChannel) != 0) image->colormap[i].red=PosterizePixel(image->colormap[i].red); if ((channel & GreenChannel) != 0) image->colormap[i].green=PosterizePixel(image->colormap[i].green); if ((channel & BlueChannel) != 0) image->colormap[i].blue=PosterizePixel(image->colormap[i].blue); if ((channel & OpacityChannel) != 0) image->colormap[i].opacity=PosterizePixel(image->colormap[i].opacity); } /* Posterize image. */ status=MagickTrue; progress=0; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetPixelRed(q,PosterizePixel(GetPixelRed(q))); if ((channel & GreenChannel) != 0) SetPixelGreen(q,PosterizePixel(GetPixelGreen(q))); if ((channel & BlueChannel) != 0) SetPixelBlue(q,PosterizePixel(GetPixelBlue(q))); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelOpacity(q,PosterizePixel(GetPixelOpacity(q))); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(indexes+x,PosterizePixel(GetPixelIndex(indexes+x))); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PosterizeImageChannel) #endif proceed=SetImageProgress(image,PosterizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither=dither; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(image,cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.opacity+=node_info->total_color.opacity; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(image,cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(image,cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(image,cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % */ static MagickBooleanType DirectToColormapImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t number_colors; ssize_t y; status=MagickTrue; number_colors=(size_t) (image->columns*image->rows); if (AcquireImageColormap(image,number_colors) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->colors != number_colors) return(MagickFalse); i=0; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType proceed; register IndexPacket *restrict indexes; register PixelPacket *restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (const PixelPacket *) NULL) break; indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { image->colormap[i].red=GetPixelRed(q); image->colormap[i].green=GetPixelGreen(q); image->colormap[i].blue=GetPixelBlue(q); image->colormap[i].opacity=GetPixelOpacity(q); SetPixelIndex(indexes+x,i); i++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) break; proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->matte == MagickFalse) { if ((image->columns*image->rows) <= maximum_colors) (void) DirectToColormapImage(image,&image->exception); if (IsGrayImage(image,&image->exception) != MagickFalse) (void) SetGrayscaleImage(image); } if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) return(MagickTrue); depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither != MagickFalse) && (depth > 2)) depth--; if ((image->matte != MagickFalse) && (depth > 5)) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,&image->exception); if (status != MagickFalse) { /* Reduce the number of colors in the image. */ ReduceImageColors(image,cube_info); status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickSignature); assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither != MagickFalse) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(&images->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,&image->exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(const Image *image,CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(image,cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(image,cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(image,cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest color from % a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,&image->exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image) % % A description of each parameter follows: % % o image: The image. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { PixelPacket *color_1, *color_2; int intensity; color_1=(PixelPacket *) x; color_2=(PixelPacket *) y; intensity=PixelPacketIntensity(color_1)-(int) PixelPacketIntensity(color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; PixelPacket *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxMap+1, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { ExceptionInfo *exception; for (i=0; i <= (ssize_t) MaxMap; i++) colormap_index[i]=(-1); if (AcquireImageColormap(image,MaxMap+1) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=GetPixelRed(q); image->colormap[image->colors].green=GetPixelGreen(q); image->colormap[image->colors].blue=GetPixelBlue(q); image->colors++; } } SetPixelIndex(indexes+x,colormap_index[intensity]); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].opacity=(unsigned short) i; qsort((void *) image->colormap,image->colors,sizeof(PixelPacket), IntensityCompare); colormap=(PixelPacket *) AcquireQuantumMemory(image->colors, sizeof(*colormap)); if (colormap == (PixelPacket *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsSameColor(image,&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].opacity]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register const PixelPacket *restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,colormap_index[ScaleQuantumToMap(GetPixelIndex( indexes+x))]); if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (IsMonochromeImage(image,&image->exception) != MagickFalse) image->type=BilevelType; return(status); }
GB_binop__second_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__second_uint32 // A.*B function (eWiseMult): GB_AemultB__second_uint32 // A*D function (colscale): GB_AxD__second_uint32 // D*A function (rowscale): GB_DxB__second_uint32 // C+=B function (dense accum): GB_Cdense_accumB__second_uint32 // C+=b function (dense accum): GB_Cdense_accumb__second_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__second_uint32 // C=scalar+B (none) // C=scalar+B' (none) // C=A+scalar GB_bind2nd__second_uint32 // C=A'+scalar GB_bind2nd_tran__second_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = bij #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = y ; // op is second #define GB_OP_IS_SECOND \ 1 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_UINT32 || GxB_NO_SECOND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__second_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__second_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__second_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__second_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__second_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__second_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__second_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__second_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB_bind2nd_tran__second_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolutiondepthwise_3x3_pack8_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const signed char* k0 = kernel.row<const signed char>(g); int* outptr0 = out.row<int>(0); int* outptr1 = out.row<int>(1); const Mat img0 = bottom_blob.channel(g); const signed char* r0 = img0.row<const signed char>(0); const signed char* r1 = img0.row<const signed char>(1); const signed char* r2 = img0.row<const signed char>(2); const signed char* r3 = img0.row<const signed char>(2); int8x8_t _k00 = vld1_s8(k0); int8x8_t _k01 = vld1_s8(k0 + 8); int8x8_t _k02 = vld1_s8(k0 + 16); int8x8_t _k10 = vld1_s8(k0 + 24); int8x8_t _k11 = vld1_s8(k0 + 32); int8x8_t _k12 = vld1_s8(k0 + 40); int8x8_t _k20 = vld1_s8(k0 + 48); int8x8_t _k21 = vld1_s8(k0 + 56); int8x8_t _k22 = vld1_s8(k0 + 64); int i = 0; for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 1 < outw; j += 2) { int8x16_t _r0001 = vld1q_s8(r0); int8x16_t _r0203 = vld1q_s8(r0 + 16); int8x16_t _r1011 = vld1q_s8(r1); int8x16_t _r1213 = vld1q_s8(r1 + 16); int8x16_t _r2021 = vld1q_s8(r2); int8x16_t _r2223 = vld1q_s8(r2 + 16); int8x16_t _r3031 = vld1q_s8(r3); int8x16_t _r3233 = vld1q_s8(r3 + 16); int16x8_t _s00 = vmull_s8(vget_low_s8(_r0001), _k00); int16x8_t _s01 = vmull_s8(vget_high_s8(_r0001), _k01); int16x8_t _s02 = vmull_s8(vget_low_s8(_r0203), _k02); int16x8_t _s03 = vmull_s8(vget_low_s8(_r1011), _k10); int16x8_t _s10 = vmull_s8(vget_high_s8(_r0001), _k00); int16x8_t _s11 = vmull_s8(vget_low_s8(_r0203), _k01); int16x8_t _s12 = vmull_s8(vget_high_s8(_r0203), _k02); int16x8_t _s13 = vmull_s8(vget_high_s8(_r1011), _k10); int16x8_t _s20 = vmull_s8(vget_low_s8(_r1011), _k00); int16x8_t _s21 = vmull_s8(vget_high_s8(_r1011), _k01); int16x8_t _s22 = vmull_s8(vget_low_s8(_r1213), _k02); int16x8_t _s23 = vmull_s8(vget_low_s8(_r2021), _k10); int16x8_t _s30 = vmull_s8(vget_high_s8(_r1011), _k00); int16x8_t _s31 = vmull_s8(vget_low_s8(_r1213), _k01); int16x8_t _s32 = vmull_s8(vget_high_s8(_r1213), _k02); int16x8_t _s33 = vmull_s8(vget_high_s8(_r2021), _k10); _s00 = vmlal_s8(_s00, vget_high_s8(_r1011), _k11); _s01 = vmlal_s8(_s01, vget_low_s8(_r1213), _k12); _s02 = vmlal_s8(_s02, vget_low_s8(_r2021), _k20); _s03 = vmlal_s8(_s03, vget_high_s8(_r2021), _k21); _s10 = vmlal_s8(_s10, vget_low_s8(_r1213), _k11); _s11 = vmlal_s8(_s11, vget_high_s8(_r1213), _k12); _s12 = vmlal_s8(_s12, vget_high_s8(_r2021), _k20); _s13 = vmlal_s8(_s13, vget_low_s8(_r2223), _k21); _s20 = vmlal_s8(_s20, vget_high_s8(_r2021), _k11); _s21 = vmlal_s8(_s21, vget_low_s8(_r2223), _k12); _s22 = vmlal_s8(_s22, vget_low_s8(_r3031), _k20); _s23 = vmlal_s8(_s23, vget_high_s8(_r3031), _k21); _s30 = vmlal_s8(_s30, vget_low_s8(_r2223), _k11); _s31 = vmlal_s8(_s31, vget_high_s8(_r2223), _k12); _s32 = vmlal_s8(_s32, vget_high_s8(_r3031), _k20); _s33 = vmlal_s8(_s33, vget_low_s8(_r3233), _k21); int16x8_t _s08 = vmull_s8(vget_low_s8(_r2223), _k22); int16x8_t _s18 = vmull_s8(vget_high_s8(_r2223), _k22); int16x8_t _s28 = vmull_s8(vget_low_s8(_r3233), _k22); int16x8_t _s38 = vmull_s8(vget_high_s8(_r3233), _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); int32x4_t _sum20 = vaddl_s16(vget_low_s16(_s20), vget_low_s16(_s21)); int32x4_t _sum21 = vaddl_s16(vget_high_s16(_s20), vget_high_s16(_s21)); int32x4_t _sum22 = vaddl_s16(vget_low_s16(_s22), vget_low_s16(_s23)); int32x4_t _sum23 = vaddl_s16(vget_high_s16(_s22), vget_high_s16(_s23)); int32x4_t _sum30 = vaddl_s16(vget_low_s16(_s30), vget_low_s16(_s31)); int32x4_t _sum31 = vaddl_s16(vget_high_s16(_s30), vget_high_s16(_s31)); int32x4_t _sum32 = vaddl_s16(vget_low_s16(_s32), vget_low_s16(_s33)); int32x4_t _sum33 = vaddl_s16(vget_high_s16(_s32), vget_high_s16(_s33)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum20 = vaddw_s16(_sum20, vget_low_s16(_s28)); _sum21 = vaddw_s16(_sum21, vget_high_s16(_s28)); _sum30 = vaddw_s16(_sum30, vget_low_s16(_s38)); _sum31 = vaddw_s16(_sum31, vget_high_s16(_s38)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); _sum20 = vaddq_s32(_sum20, _sum22); _sum21 = vaddq_s32(_sum21, _sum23); _sum30 = vaddq_s32(_sum30, _sum32); _sum31 = vaddq_s32(_sum31, _sum33); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr0 + 8, _sum10); vst1q_s32(outptr0 + 12, _sum11); vst1q_s32(outptr1, _sum20); vst1q_s32(outptr1 + 4, _sum21); vst1q_s32(outptr1 + 8, _sum30); vst1q_s32(outptr1 + 12, _sum31); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; outptr1 += 16; } for (; j < outw; j++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int8x8_t _r30 = vld1_s8(r3); int8x8_t _r31 = vld1_s8(r3 + 8); int8x8_t _r32 = vld1_s8(r3 + 16); int16x8_t _s00 = vmull_s8(_r00, _k00); int16x8_t _s01 = vmull_s8(_r01, _k01); int16x8_t _s02 = vmull_s8(_r02, _k02); int16x8_t _s03 = vmull_s8(_r10, _k10); int16x8_t _s10 = vmull_s8(_r10, _k00); int16x8_t _s11 = vmull_s8(_r11, _k01); int16x8_t _s12 = vmull_s8(_r12, _k02); int16x8_t _s13 = vmull_s8(_r20, _k10); _s00 = vmlal_s8(_s00, _r11, _k11); _s01 = vmlal_s8(_s01, _r12, _k12); _s02 = vmlal_s8(_s02, _r20, _k20); _s03 = vmlal_s8(_s03, _r21, _k21); _s10 = vmlal_s8(_s10, _r21, _k11); _s11 = vmlal_s8(_s11, _r22, _k12); _s12 = vmlal_s8(_s12, _r30, _k20); _s13 = vmlal_s8(_s13, _r31, _k21); int16x8_t _s08 = vmull_s8(_r22, _k22); int16x8_t _s18 = vmull_s8(_r32, _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr1, _sum10); vst1q_s32(outptr1 + 4, _sum11); r0 += 8; r1 += 8; r2 += 8; r3 += 8; outptr0 += 8; outptr1 += 8; } r0 += 2 * 8 + w * 8; r1 += 2 * 8 + w * 8; r2 += 2 * 8 + w * 8; r3 += 2 * 8 + w * 8; } for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { int8x16_t _r0001 = vld1q_s8(r0); int8x16_t _r0203 = vld1q_s8(r0 + 16); int8x16_t _r1011 = vld1q_s8(r1); int8x16_t _r1213 = vld1q_s8(r1 + 16); int8x16_t _r2021 = vld1q_s8(r2); int8x16_t _r2223 = vld1q_s8(r2 + 16); int16x8_t _s00 = vmull_s8(vget_low_s8(_r0001), _k00); int16x8_t _s01 = vmull_s8(vget_high_s8(_r0001), _k01); int16x8_t _s02 = vmull_s8(vget_low_s8(_r0203), _k02); int16x8_t _s03 = vmull_s8(vget_low_s8(_r1011), _k10); int16x8_t _s10 = vmull_s8(vget_high_s8(_r0001), _k00); int16x8_t _s11 = vmull_s8(vget_low_s8(_r0203), _k01); int16x8_t _s12 = vmull_s8(vget_high_s8(_r0203), _k02); int16x8_t _s13 = vmull_s8(vget_high_s8(_r1011), _k10); _s00 = vmlal_s8(_s00, vget_high_s8(_r1011), _k11); _s01 = vmlal_s8(_s01, vget_low_s8(_r1213), _k12); _s02 = vmlal_s8(_s02, vget_low_s8(_r2021), _k20); _s03 = vmlal_s8(_s03, vget_high_s8(_r2021), _k21); _s10 = vmlal_s8(_s10, vget_low_s8(_r1213), _k11); _s11 = vmlal_s8(_s11, vget_high_s8(_r1213), _k12); _s12 = vmlal_s8(_s12, vget_high_s8(_r2021), _k20); _s13 = vmlal_s8(_s13, vget_low_s8(_r2223), _k21); int16x8_t _s08 = vmull_s8(vget_low_s8(_r2223), _k22); int16x8_t _s18 = vmull_s8(vget_high_s8(_r2223), _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr0 + 8, _sum10); vst1q_s32(outptr0 + 12, _sum11); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; } for (; j < outw; j++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int16x8_t _s0 = vmull_s8(_r00, _k00); int16x8_t _s1 = vmull_s8(_r01, _k01); int16x8_t _s2 = vmull_s8(_r02, _k02); int16x8_t _s3 = vmull_s8(_r10, _k10); _s0 = vmlal_s8(_s0, _r11, _k11); _s1 = vmlal_s8(_s1, _r12, _k12); _s2 = vmlal_s8(_s2, _r20, _k20); _s3 = vmlal_s8(_s3, _r21, _k21); int16x8_t _s4 = vmull_s8(_r22, _k22); int32x4_t _sum0 = vaddl_s16(vget_low_s16(_s0), vget_low_s16(_s1)); int32x4_t _sum1 = vaddl_s16(vget_high_s16(_s0), vget_high_s16(_s1)); int32x4_t _sum2 = vaddl_s16(vget_low_s16(_s2), vget_low_s16(_s3)); int32x4_t _sum3 = vaddl_s16(vget_high_s16(_s2), vget_high_s16(_s3)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s4)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s4)); _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); r0 += 8; r1 += 8; r2 += 8; outptr0 += 8; } r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; } } } static void convdw3x3s2_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const signed char* k0 = kernel.row<const signed char>(g); int* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const signed char* r0 = img0.row<const signed char>(0); const signed char* r1 = img0.row<const signed char>(1); const signed char* r2 = img0.row<const signed char>(2); int8x8_t _k00 = vld1_s8(k0); int8x8_t _k01 = vld1_s8(k0 + 8); int8x8_t _k02 = vld1_s8(k0 + 16); int8x8_t _k10 = vld1_s8(k0 + 24); int8x8_t _k11 = vld1_s8(k0 + 32); int8x8_t _k12 = vld1_s8(k0 + 40); int8x8_t _k20 = vld1_s8(k0 + 48); int8x8_t _k21 = vld1_s8(k0 + 56); int8x8_t _k22 = vld1_s8(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r03 = vld1_s8(r0 + 24); int8x8_t _r04 = vld1_s8(r0 + 32); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r13 = vld1_s8(r1 + 24); int8x8_t _r14 = vld1_s8(r1 + 32); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int8x8_t _r23 = vld1_s8(r2 + 24); int8x8_t _r24 = vld1_s8(r2 + 32); int16x8_t _s00 = vmull_s8(_r00, _k00); int16x8_t _s01 = vmull_s8(_r01, _k01); int16x8_t _s02 = vmull_s8(_r02, _k02); int16x8_t _s03 = vmull_s8(_r10, _k10); int16x8_t _s10 = vmull_s8(_r02, _k00); int16x8_t _s11 = vmull_s8(_r03, _k01); int16x8_t _s12 = vmull_s8(_r04, _k02); int16x8_t _s13 = vmull_s8(_r12, _k10); _s00 = vmlal_s8(_s00, _r11, _k11); _s01 = vmlal_s8(_s01, _r12, _k12); _s02 = vmlal_s8(_s02, _r20, _k20); _s03 = vmlal_s8(_s03, _r21, _k21); _s10 = vmlal_s8(_s10, _r13, _k11); _s11 = vmlal_s8(_s11, _r14, _k12); _s12 = vmlal_s8(_s12, _r22, _k20); _s13 = vmlal_s8(_s13, _r23, _k21); int16x8_t _s08 = vmull_s8(_r22, _k22); int16x8_t _s18 = vmull_s8(_r24, _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr0 + 8, _sum11); vst1q_s32(outptr0 + 12, _sum11); r0 += 32; r1 += 32; r2 += 32; outptr0 += 16; } for (; j < outw; j++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int16x8_t _s0 = vmull_s8(_r00, _k00); int16x8_t _s1 = vmull_s8(_r01, _k01); int16x8_t _s2 = vmull_s8(_r02, _k02); int16x8_t _s3 = vmull_s8(_r10, _k10); _s0 = vmlal_s8(_s0, _r11, _k11); _s1 = vmlal_s8(_s1, _r12, _k12); _s2 = vmlal_s8(_s2, _r20, _k20); _s3 = vmlal_s8(_s3, _r21, _k21); int16x8_t _s4 = vmull_s8(_r22, _k22); int32x4_t _sum0 = vaddl_s16(vget_low_s16(_s0), vget_low_s16(_s1)); int32x4_t _sum1 = vaddl_s16(vget_high_s16(_s0), vget_high_s16(_s1)); int32x4_t _sum2 = vaddl_s16(vget_low_s16(_s2), vget_low_s16(_s3)); int32x4_t _sum3 = vaddl_s16(vget_high_s16(_s2), vget_high_s16(_s3)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s4)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s4)); _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); r0 += 16; r1 += 16; r2 += 16; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
convolutiondepthwise_5x5.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 25; float* outptr = out; float* outptr2 = outptr + outw; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* r5 = img0 + w * 5; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0 + 4); float32x4_t _k891011 = vld1q_f32(kernel0 + 8); float32x4_t _k12131415 = vld1q_f32(kernel0 + 12); float32x4_t _k16171819 = vld1q_f32(kernel0 + 16); float32x4_t _k20212223 = vld1q_f32(kernel0 + 20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); float32x4_t _bias0 = vdupq_n_f32(bias0); #endif // __ARM_NEON int i = 0; for (; i + 1 < outh; i += 2) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // r1 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r10 r14 r18 "mov v8.16b, %25.16b \n" // v8 = _bias0 "mov v9.16b, %25.16b \n" // v9 = _bias0 "0: \n" "mov v10.16b, %25.16b \n" // v10 = _bias0 "mov v11.16b, %25.16b \n" // v11 = _bias0 "fmla v8.4s, v16.4s, %19.s[1] \n" "fmla v10.4s, v16.4s, %18.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r11 "fmla v9.4s, v17.4s, %19.s[1] \n" "fmla v11.4s, v17.4s, %18.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r15 "fmla v8.4s, v17.4s, %20.s[1] \n" "fmla v10.4s, v17.4s, %19.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r12 "fmla v9.4s, v18.4s, %20.s[1] \n" "fmla v11.4s, v18.4s, %19.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r16 "fmla v8.4s, v19.4s, %19.s[2] \n" "fmla v10.4s, v19.4s, %18.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r13 "fmla v9.4s, v20.4s, %19.s[2] \n" "fmla v11.4s, v20.4s, %18.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r17 "fmla v8.4s, v21.4s, %19.s[3] \n" "fmla v10.4s, v21.4s, %18.s[2] \n" "add %4, %4, #32 \n" "fmla v9.4s, v22.4s, %19.s[3] \n" "fmla v11.4s, v22.4s, %18.s[2] \n" // r2 "prfm pldl1keep, [%5, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n" // v12 v13 v14 = r20 r24 r28 "fmla v8.4s, v19.4s, %20.s[0] \n" "fmla v10.4s, v19.4s, %18.s[3] \n" "fmla v9.4s, v20.4s, %20.s[0] \n" "fmla v11.4s, v20.4s, %18.s[3] \n" "add %5, %5, #32 \n" "fmla v8.4s, v12.4s, %20.s[2] \n" "fmla v10.4s, v12.4s, %19.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r21 "fmla v9.4s, v13.4s, %20.s[2] \n" "fmla v11.4s, v13.4s, %19.s[1] \n" "ext v22.16b, v13.16b, v14.16b, #4 \n" // r25 "fmla v8.4s, v13.4s, %21.s[2] \n" "fmla v10.4s, v13.4s, %20.s[1] \n" "ext v19.16b, v12.16b, v13.16b, #8 \n" // r22 "fmla v9.4s, v14.4s, %21.s[2] \n" "fmla v11.4s, v14.4s, %20.s[1] \n" "ext v20.16b, v13.16b, v14.16b, #8 \n" // r26 "fmla v8.4s, v21.4s, %20.s[3] \n" "fmla v10.4s, v21.4s, %19.s[2] \n" "ext v21.16b, v12.16b, v13.16b, #12 \n" // r23 "fmla v9.4s, v22.4s, %20.s[3] \n" "fmla v11.4s, v22.4s, %19.s[2] \n" "ext v22.16b, v13.16b, v14.16b, #12 \n" // r27 "fmla v8.4s, v19.4s, %21.s[0] \n" "fmla v10.4s, v19.4s, %19.s[3] \n" "fmla v9.4s, v20.4s, %21.s[0] \n" "fmla v11.4s, v20.4s, %19.s[3] \n" // r3 "prfm pldl1keep, [%6, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n" // v16 v17 v18 = r30 r34 r38 "fmla v8.4s, v21.4s, %21.s[1] \n" "fmla v10.4s, v21.4s, %20.s[0] \n" "fmla v9.4s, v22.4s, %21.s[1] \n" "fmla v11.4s, v22.4s, %20.s[0] \n" "add %6, %6, #32 \n" "fmla v8.4s, v16.4s, %21.s[3] \n" "fmla v10.4s, v16.4s, %20.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r31 "fmla v9.4s, v17.4s, %21.s[3] \n" "fmla v11.4s, v17.4s, %20.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r35 "fmla v8.4s, v17.4s, %22.s[3] \n" "fmla v10.4s, v17.4s, %21.s[2] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r32 "fmla v9.4s, v18.4s, %22.s[3] \n" "fmla v11.4s, v18.4s, %21.s[2] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r36 "fmla v8.4s, v19.4s, %22.s[0] \n" "fmla v10.4s, v19.4s, %20.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r33 "fmla v9.4s, v20.4s, %22.s[0] \n" "fmla v11.4s, v20.4s, %20.s[3] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r37 "fmla v8.4s, v21.4s, %22.s[1] \n" "fmla v10.4s, v21.4s, %21.s[0] \n" "fmla v9.4s, v22.4s, %22.s[1] \n" "fmla v11.4s, v22.4s, %21.s[0] \n" // r4 "prfm pldl1keep, [%7, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%7] \n" // v12 v13 v14 = r40 r44 r48 "fmla v8.4s, v19.4s, %22.s[2] \n" "fmla v10.4s, v19.4s, %21.s[1] \n" "add %7, %7, #32 \n" "fmla v9.4s, v20.4s, %22.s[2] \n" "fmla v11.4s, v20.4s, %21.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r41 "fmla v8.4s, v12.4s, %23.s[0] \n" "fmla v10.4s, v12.4s, %21.s[3] \n" "ext v22.16b, v13.16b, v14.16b, #4 \n" // r45 "fmla v9.4s, v13.4s, %23.s[0] \n" "fmla v11.4s, v13.4s, %21.s[3] \n" "ext v19.16b, v12.16b, v13.16b, #8 \n" // r42 "fmla v8.4s, v13.4s, %24.s[0] \n" "fmla v10.4s, v13.4s, %22.s[3] \n" "ext v20.16b, v13.16b, v14.16b, #8 \n" // r46 "fmla v9.4s, v14.4s, %24.s[0] \n" "fmla v11.4s, v14.4s, %22.s[3] \n" // r0 and r5 "prfm pldl1keep, [%3, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%3] \n" // v16 v17 v18 = r00 r04 r08 "fmla v8.4s, v21.4s, %23.s[1] \n" "fmla v10.4s, v21.4s, %22.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #12 \n" // r43 "fmla v9.4s, v22.4s, %23.s[1] \n" "fmla v11.4s, v22.4s, %22.s[0] \n" "ext v22.16b, v13.16b, v14.16b, #12 \n" // r47 "fmla v8.4s, v19.4s, %23.s[2] \n" "fmla v10.4s, v19.4s, %22.s[1] \n" "prfm pldl1keep, [%8, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%8] \n" // v12 v13 v14 = r50 r54 r58 "fmla v9.4s, v20.4s, %23.s[2] \n" "fmla v11.4s, v20.4s, %22.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r01 "fmla v8.4s, v21.4s, %23.s[3] \n" "fmla v10.4s, v21.4s, %22.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #4 \n" // r51 "fmla v9.4s, v22.4s, %23.s[3] \n" "fmla v11.4s, v22.4s, %22.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r05 "fmla v8.4s, v16.4s, %18.s[0] \n" "fmla v10.4s, v12.4s, %23.s[0] \n" "ext v24.16b, v13.16b, v14.16b, #4 \n" // r55 "fmla v9.4s, v17.4s, %18.s[0] \n" "fmla v11.4s, v13.4s, %23.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v8.4s, v17.4s, %19.s[0] \n" "fmla v10.4s, v13.4s, %24.s[0] \n" "ext v25.16b, v12.16b, v13.16b, #8 \n" // r52 "fmla v9.4s, v18.4s, %19.s[0] \n" "fmla v11.4s, v14.4s, %24.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r06 "fmla v8.4s, v19.4s, %18.s[1] \n" "fmla v10.4s, v23.4s, %23.s[1] \n" "ext v26.16b, v13.16b, v14.16b, #8 \n" // r56 "fmla v9.4s, v20.4s, %18.s[1] \n" "fmla v11.4s, v24.4s, %23.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v8.4s, v21.4s, %18.s[2] \n" "fmla v10.4s, v25.4s, %23.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r53 "fmla v9.4s, v22.4s, %18.s[2] \n" "fmla v11.4s, v26.4s, %23.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r07 "fmla v8.4s, v19.4s, %18.s[3] \n" "fmla v10.4s, v23.4s, %23.s[3] \n" "ext v24.16b, v13.16b, v14.16b, #12 \n" // r57 "fmla v9.4s, v20.4s, %18.s[3] \n" "add %3, %3, #32 \n" "fmla v11.4s, v24.4s, %23.s[3] \n" "add %8, %8, #32 \n" // r1 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r10 r14 r18 "subs %w0, %w0, #1 \n" "st1 {v8.4s, v9.4s}, [%1], #32 \n" "mov v8.16b, %25.16b \n" // v8 = _bias0 "mov v9.16b, %25.16b \n" // v9 = _bias0 "st1 {v10.4s, v11.4s}, [%2], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424), // %24 "w"(_bias0) // %25 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26"); } if (remain >= 4) { remain -= 4; asm volatile( // r1 "prfm pldl1keep, [%3, #256] \n" "ld1 {v12.4s, v13.4s}, [%3] \n" // v12 v13 = r10 r14 "mov v8.16b, %23.16b \n" // v8 = _bias0 "mov v9.16b, %23.16b \n" // v9 = _bias0 "fmul v10.4s, v12.4s, %17.s[1] \n" "fmul v11.4s, v12.4s, %16.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r11 "fmla v8.4s, v13.4s, %18.s[1] \n" "fmla v9.4s, v13.4s, %17.s[0] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n" // r12 "fmla v10.4s, v21.4s, %17.s[2] \n" "fmla v11.4s, v21.4s, %16.s[1] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r13 "fmla v8.4s, v22.4s, %17.s[3] \n" "fmla v9.4s, v22.4s, %16.s[2] \n" // r2 "prfm pldl1keep, [%4, #256] \n" "ld1 {v16.4s, v17.4s}, [%4] \n" // v16 v17 = r20 r24 "fmla v10.4s, v23.4s, %18.s[0] \n" "fmla v11.4s, v23.4s, %16.s[3] \n" "add %4, %4, #16 \n" "fmla v8.4s, v16.4s, %18.s[2] \n" "fmla v9.4s, v16.4s, %17.s[1] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r21 "fmla v10.4s, v17.4s, %19.s[2] \n" "fmla v11.4s, v17.4s, %18.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r22 "fmla v8.4s, v18.4s, %18.s[3] \n" "fmla v9.4s, v18.4s, %17.s[2] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r23 "fmla v10.4s, v19.4s, %19.s[0] \n" "fmla v11.4s, v19.4s, %17.s[3] \n" // r3 "prfm pldl1keep, [%5, #256] \n" "ld1 {v12.4s, v13.4s}, [%5] \n" // v12 v13 = r30 r34 "fmla v8.4s, v20.4s, %19.s[1] \n" "fmla v9.4s, v20.4s, %18.s[0] \n" "add %5, %5, #16 \n" "fmla v10.4s, v12.4s, %19.s[3] \n" "fmla v11.4s, v12.4s, %18.s[2] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r31 "fmla v8.4s, v13.4s, %20.s[3] \n" "fmla v9.4s, v13.4s, %19.s[2] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n" // r32 "fmla v10.4s, v21.4s, %20.s[0] \n" "fmla v11.4s, v21.4s, %18.s[3] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r33 "fmla v8.4s, v22.4s, %20.s[1] \n" "fmla v9.4s, v22.4s, %19.s[0] \n" // r4 "prfm pldl1keep, [%6, #256] \n" "ld1 {v16.4s, v17.4s}, [%6] \n" // v16 v17 = r40 r44 "fmla v10.4s, v23.4s, %20.s[2] \n" "fmla v11.4s, v23.4s, %19.s[1] \n" "add %6, %6, #16 \n" "fmla v8.4s, v16.4s, %21.s[0] \n" "fmla v9.4s, v16.4s, %19.s[3] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r41 "fmla v10.4s, v17.4s, %22.s[0] \n" "fmla v11.4s, v17.4s, %20.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r42 "fmla v8.4s, v18.4s, %21.s[1] \n" "fmla v9.4s, v18.4s, %20.s[0] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r43 "fmla v10.4s, v19.4s, %21.s[2] \n" "fmla v11.4s, v19.4s, %20.s[1] \n" // r0 "prfm pldl1keep, [%2, #256] \n" "ld1 {v16.4s, v17.4s}, [%2] \n" // v16 v17 = r00 r04 "fmla v8.4s, v20.4s, %21.s[3] \n" "fmla v9.4s, v20.4s, %20.s[2] \n" // r5 "prfm pldl1keep, [%7, #256] \n" "ld1 {v12.4s, v13.4s}, [%7] \n" // v12 v13 = r50 r54 "fmla v10.4s, v16.4s, %16.s[0] \n" "fmla v11.4s, v12.4s, %21.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r01 "fmla v8.4s, v17.4s, %17.s[0] \n" "ext v21.16b, v12.16b, v13.16b, #4 \n" // r51 "fmla v9.4s, v13.4s, %22.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v10.4s, v18.4s, %16.s[1] \n" "ext v22.16b, v12.16b, v13.16b, #8 \n" // r52 "fmla v11.4s, v21.4s, %21.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v8.4s, v19.4s, %16.s[2] \n" "ext v23.16b, v12.16b, v13.16b, #12 \n" // r53 "fmla v9.4s, v22.4s, %21.s[2] \n" "add %3, %3, #16 \n" "fmla v10.4s, v20.4s, %16.s[3] \n" "fmla v11.4s, v23.4s, %21.s[3] \n" "add %2, %2, #16 \n" "fadd v8.4s, v8.4s, v10.4s \n" "fadd v9.4s, v9.4s, v11.4s \n" "add %7, %7, #16 \n" "st1 {v8.4s}, [%0], #16 \n" "st1 {v9.4s}, [%1], #16 \n" : "=r"(outptr), // %0 "=r"(outptr2), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4), // %6 "=r"(r5) // %7 : "0"(outptr), "1"(outptr2), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "7"(r5), "w"(_k0123), // %16 "w"(_k4567), // %17 "w"(_k891011), // %18 "w"(_k12131415), // %19 "w"(_k16171819), // %20 "w"(_k20212223), // %21 "w"(_k24242424), // %22 "w"(_bias0) // %23 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } #else if (nn > 0) { asm volatile( // r1 "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" // q14 q15 = r10 r14 "vmov q8, %q25 \n" // q8 = _bias0 "0: \n" "vmov q9, %q25 \n" // q9 = _bias0 "vmla.f32 q8, q14, %e19[1] \n" "vmla.f32 q9, q14, %e18[0] \n" "vext.32 q12, q14, q15, #1 \n" // r11 "vmla.f32 q8, q15, %e20[1] \n" "vmla.f32 q9, q15, %e19[0] \n" "vext.32 q13, q14, q15, #2 \n" // r12 "vmla.f32 q8, q12, %f19[0] \n" "vmla.f32 q9, q12, %e18[1] \n" "vext.32 q12, q14, q15, #3 \n" // r13 "vmla.f32 q8, q13, %f19[1] \n" "vmla.f32 q9, q13, %f18[0] \n" // r2 "pld [%5, #256] \n" "vld1.f32 {d20-d23}, [%5] \n" // q10 q11 = r20 r24 "vmla.f32 q8, q12, %e20[0] \n" "vmla.f32 q9, q12, %f18[1] \n" "add %5, #16 \n" "vmla.f32 q8, q10, %f20[0] \n" "vmla.f32 q9, q10, %e19[1] \n" "vext.32 q12, q10, q11, #1 \n" // r21 "vmla.f32 q8, q11, %f21[0] \n" "vmla.f32 q9, q11, %e20[1] \n" "vext.32 q13, q10, q11, #2 \n" // r22 "vmla.f32 q8, q12, %f20[1] \n" "vmla.f32 q9, q12, %f19[0] \n" "vext.32 q12, q10, q11, #3 \n" // r23 "vmla.f32 q8, q13, %e21[0] \n" "vmla.f32 q9, q13, %f19[1] \n" // r3 "pld [%6, #256] \n" "vld1.f32 {d28-d31}, [%6] \n" // q14 q15 = r30 r34 "vmla.f32 q8, q12, %e21[1] \n" "vmla.f32 q9, q12, %e20[0] \n" "add %6, #16 \n" "vmla.f32 q8, q14, %f21[1] \n" "vmla.f32 q9, q14, %f20[0] \n" "vext.32 q12, q14, q15, #1 \n" // r31 "vmla.f32 q8, q15, %f22[1] \n" "vmla.f32 q9, q15, %f21[0] \n" "vext.32 q13, q14, q15, #2 \n" // r32 "vmla.f32 q8, q12, %e22[0] \n" "vmla.f32 q9, q12, %f20[1] \n" "vext.32 q12, q14, q15, #3 \n" // r33 "vmla.f32 q8, q13, %e22[1] \n" "vmla.f32 q9, q13, %e21[0] \n" // r4 "pld [%7, #256] \n" "vld1.f32 {d20-d23}, [%7] \n" // q10 q11 = r40 r44 "vmla.f32 q8, q12, %f22[0] \n" "vmla.f32 q9, q12, %e21[1] \n" "add %7, #16 \n" "vmla.f32 q8, q10, %e23[0] \n" "vmla.f32 q9, q10, %f21[1] \n" "vext.32 q12, q10, q11, #1 \n" // r41 "vmla.f32 q8, q11, %e24[0] \n" "vmla.f32 q9, q11, %f22[1] \n" "vext.32 q13, q10, q11, #2 \n" // r42 "vmla.f32 q8, q12, %e23[1] \n" "vmla.f32 q9, q12, %e22[0] \n" "vext.32 q12, q10, q11, #3 \n" // r43 "vmla.f32 q8, q13, %f23[0] \n" "vmla.f32 q9, q13, %e22[1] \n" // r0 and r5 "pld [%3, #256] \n" "vld1.f32 {d20-d23}, [%3] \n" // q10 q11 = r00 r04 "vmla.f32 q8, q12, %f23[1] \n" "vmla.f32 q9, q12, %f22[0] \n" // r5 "pld [%8, #256] \n" "vld1.f32 {d28-d31}, [%8] \n" // q14 q15 = r50 r54 "vmla.f32 q8, q10, %e18[0] \n" "vmla.f32 q9, q14, %e23[0] \n" "vext.32 q12, q10, q11, #1 \n" // r01 "vmla.f32 q8, q11, %e19[0] \n" "vmla.f32 q9, q15, %e24[0] \n" "vext.32 q13, q14, q15, #1 \n" // r51 "vmla.f32 q8, q12, %e18[1] \n" "vext.32 q12, q10, q11, #2 \n" // r02 "vmla.f32 q9, q13, %e23[1] \n" "vext.32 q13, q14, q15, #2 \n" // r52 "vmla.f32 q8, q12, %f18[0] \n" "vext.32 q12, q10, q11, #3 \n" // r03 "vmla.f32 q9, q13, %f23[0] \n" "vext.32 q13, q14, q15, #3 \n" // r33 "vmla.f32 q8, q12, %f18[1] \n" "add %3, #16 \n" "vmla.f32 q9, q13, %f23[1] \n" "add %4, #16 \n" // r1 "pld [%4, #256] \n" "vld1.f32 {d28-d31}, [%4] \n" // q14 q15 = r10 r14 "add %8, #16 \n" "vst1.f32 {d16-d17}, [%1]! \n" "vmov q8, %q25 \n" // q8 = _bias0 "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%2]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(outptr2), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2), // %5 "=r"(r3), // %6 "=r"(r4), // %7 "=r"(r5) // %8 : "0"(nn), "1"(outptr), "2"(outptr2), "3"(r0), "4"(r1), "5"(r2), "6"(r3), "7"(r4), "8"(r5), "w"(_k0123), // %18 "w"(_k4567), // %19 "w"(_k891011), // %20 "w"(_k12131415), // %21 "w"(_k16171819), // %22 "w"(_k20212223), // %23 "w"(_k24242424), // %24 "w"(_bias0) // %25 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = bias0; float sum2 = bias0; #if __ARM_NEON // TODO neon assembly optimize float32x4_t _r1 = vld1q_f32(r1); float32x4_t _k1 = vld1q_f32(k1); float32x4_t _sum = vmulq_f32(_r1, _k1); float32x4_t _sum2 = vmulq_f32(_r1, _k0123); float32x4_t _r2 = vld1q_f32(r2); float32x4_t _k2 = vld1q_f32(k2); _sum = vmlaq_f32(_sum, _r2, _k2); _sum2 = vmlaq_f32(_sum2, _r2, _k1); float32x4_t _r3 = vld1q_f32(r3); float32x4_t _k3 = vld1q_f32(k3); _sum = vmlaq_f32(_sum, _r3, _k3); _sum2 = vmlaq_f32(_sum2, _r3, _k2); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); _sum2 = vmlaq_f32(_sum2, _r4, _k3); float32x4_t _r0 = vld1q_f32(r0); _sum = vmlaq_f32(_sum, _r0, _k0123); float32x4_t _r5 = vld1q_f32(r5); _sum2 = vmlaq_f32(_sum2, _r5, _k20212223); float32x4_t _k_t4 = {}; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4 = {}; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum += r4[4] * k4[4]; _r_t4 = vextq_f32(_r_t4, _r_t4, 1); _r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3); _sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4); sum2 += r5[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2)); float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2); sum += vget_lane_f32(_ss_ss2, 0); sum2 += vget_lane_f32(_ss_ss2, 1); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r1[3] * k0[3]; sum2 += r1[4] * k0[4]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r2[3] * k1[3]; sum2 += r2[4] * k1[4]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; sum2 += r3[3] * k2[3]; sum2 += r3[4] * k2[4]; sum2 += r4[0] * k3[0]; sum2 += r4[1] * k3[1]; sum2 += r4[2] * k3[2]; sum2 += r4[3] * k3[3]; sum2 += r4[4] * k3[4]; sum2 += r5[0] * k4[0]; sum2 += r5[1] * k4[1]; sum2 += r5[2] * k4[2]; sum2 += r5[3] * k4[3]; sum2 += r5[4] * k4[4]; #endif // __ARM_NEON *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; r4++; r5++; outptr++; outptr2++; } r0 += 4 + w; r1 += 4 + w; r2 += 4 + w; r3 += 4 + w; r4 += 4 + w; r5 += 4 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // v10 v11 // r0 "prfm pldl1keep, [%2, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n" // v16 v17 v18 = r00 r04 r08 "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "0: \n" "fmul v10.4s, v16.4s, %14.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r01 "fmul v11.4s, v17.4s, %14.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r05 "fmla v8.4s, v17.4s, %15.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v9.4s, v18.4s, %15.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r06 "fmla v10.4s, v19.4s, %14.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v11.4s, v20.4s, %14.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r07 "fmla v8.4s, v21.4s, %14.s[2] \n" "fmla v9.4s, v22.4s, %14.s[2] \n" // r1 "prfm pldl1keep, [%3, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%3] \n" // v12 v13 v14 = r10 r14 r18 "fmla v10.4s, v19.4s, %14.s[3] \n" "fmla v11.4s, v20.4s, %14.s[3] \n" "fmla v8.4s, v12.4s, %15.s[1] \n" "ext v19.16b, v12.16b, v13.16b, #4 \n" // r11 "fmla v9.4s, v13.4s, %15.s[1] \n" "ext v20.16b, v13.16b, v14.16b, #4 \n" // r15 "fmla v10.4s, v13.4s, %16.s[1] \n" "ext v21.16b, v12.16b, v13.16b, #8 \n" // r12 "fmla v11.4s, v14.4s, %16.s[1] \n" "ext v22.16b, v13.16b, v14.16b, #8 \n" // r16 "fmla v8.4s, v19.4s, %15.s[2] \n" "ext v19.16b, v12.16b, v13.16b, #12 \n" // r13 "fmla v9.4s, v20.4s, %15.s[2] \n" "ext v20.16b, v13.16b, v14.16b, #12 \n" // r17 "fmla v10.4s, v21.4s, %15.s[3] \n" "fmla v11.4s, v22.4s, %15.s[3] \n" // r2 "prfm pldl1keep, [%4, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%4] \n" // v16 v17 v18 = r20 r24 r28 "fmla v8.4s, v19.4s, %16.s[0] \n" "fmla v9.4s, v20.4s, %16.s[0] \n" "fmla v10.4s, v16.4s, %16.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r21 "fmla v11.4s, v17.4s, %16.s[2] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r25 "fmla v8.4s, v17.4s, %17.s[2] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r22 "fmla v9.4s, v18.4s, %17.s[2] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r26 "fmla v10.4s, v19.4s, %16.s[3] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r23 "fmla v11.4s, v20.4s, %16.s[3] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r27 "fmla v8.4s, v21.4s, %17.s[0] \n" "fmla v9.4s, v22.4s, %17.s[0] \n" // r3 "prfm pldl1keep, [%5, #384] \n" "ld1 {v12.4s, v13.4s, v14.4s}, [%5] \n" // v12 v13 v14 = r30 r34 r38 "fmla v10.4s, v19.4s, %17.s[1] \n" "fmla v11.4s, v20.4s, %17.s[1] \n" "fmla v8.4s, v12.4s, %17.s[3] \n" "ext v19.16b, v12.16b, v13.16b, #4 \n" // r11 "fmla v9.4s, v13.4s, %17.s[3] \n" "ext v20.16b, v13.16b, v14.16b, #4 \n" // r15 "fmla v10.4s, v13.4s, %18.s[3] \n" "ext v21.16b, v12.16b, v13.16b, #8 \n" // r12 "fmla v11.4s, v14.4s, %18.s[3] \n" "ext v22.16b, v13.16b, v14.16b, #8 \n" // r16 "fmla v8.4s, v19.4s, %18.s[0] \n" "ext v19.16b, v12.16b, v13.16b, #12 \n" // r13 "fmla v9.4s, v20.4s, %18.s[0] \n" "ext v20.16b, v13.16b, v14.16b, #12 \n" // r17 "fmla v10.4s, v21.4s, %18.s[1] \n" "fmla v11.4s, v22.4s, %18.s[1] \n" // r4 "prfm pldl1keep, [%6, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%6] \n" // v16 v17 v18 = r40 r44 r48 "fmla v8.4s, v19.4s, %18.s[2] \n" "fmla v9.4s, v20.4s, %18.s[2] \n" "fmla v10.4s, v16.4s, %19.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #4 \n" // r41 "fmla v11.4s, v17.4s, %19.s[0] \n" "ext v20.16b, v17.16b, v18.16b, #4 \n" // r45 "fmla v8.4s, v17.4s, %20.s[0] \n" "ext v21.16b, v16.16b, v17.16b, #8 \n" // r42 "fmla v9.4s, v18.4s, %20.s[0] \n" "ext v22.16b, v17.16b, v18.16b, #8 \n" // r46 "fmla v10.4s, v19.4s, %19.s[1] \n" "ext v19.16b, v16.16b, v17.16b, #12 \n" // r43 "fmla v11.4s, v20.4s, %19.s[1] \n" "ext v20.16b, v17.16b, v18.16b, #12 \n" // r47 "fmla v8.4s, v21.4s, %19.s[2] \n" "add %2, %2, #32 \n" "fmla v9.4s, v22.4s, %19.s[2] \n" "add %3, %3, #32 \n" "fmla v10.4s, v19.4s, %19.s[3] \n" "add %4, %4, #32 \n" "fmla v11.4s, v20.4s, %19.s[3] \n" // r0 "prfm pldl1keep, [%2, #384] \n" "ld1 {v16.4s, v17.4s, v18.4s}, [%2] \n" // v16 v17 v18 = r00 r04 r08 "add %5, %5, #32 \n" "fadd v10.4s, v8.4s, v10.4s \n" "add %6, %6, #32 \n" "fadd v11.4s, v9.4s, v11.4s \n" "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "subs %w0, %w0, #1 \n" "st1 {v10.4s, v11.4s}, [%1], #32 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22"); } if (remain >= 4) { remain -= 4; asm volatile( // r0 "prfm pldl1keep, [%1, #256] \n" "ld1 {v16.4s, v17.4s}, [%1] \n" // v16 v17 = r00 r04 "mov v8.16b, %19.16b \n" // v8 = _bias0 "add %1, %1, #16 \n" "fmul v9.4s, v16.4s, %12.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r01 "fmla v8.4s, v17.4s, %13.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r02 "fmla v9.4s, v18.4s, %12.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r03 "fmla v8.4s, v19.4s, %12.s[2] \n" // r1 "prfm pldl1keep, [%2, #256] \n" "ld1 {v10.4s, v11.4s}, [%2] \n" // v10 v11 = r10 r14 "fmla v9.4s, v20.4s, %12.s[3] \n" "add %2, %2, #16 \n" "fmla v8.4s, v10.4s, %13.s[1] \n" "ext v12.16b, v10.16b, v11.16b, #4 \n" // r11 "fmla v9.4s, v11.4s, %14.s[1] \n" "ext v13.16b, v10.16b, v11.16b, #8 \n" // r12 "fmla v8.4s, v12.4s, %13.s[2] \n" "ext v14.16b, v10.16b, v11.16b, #12 \n" // r13 "fmla v9.4s, v13.4s, %13.s[3] \n" // r2 "prfm pldl1keep, [%3, #256] \n" "ld1 {v16.4s, v17.4s}, [%3] \n" // v16 v17 = r20 r24 "fmla v8.4s, v14.4s, %14.s[0] \n" "add %3, %3, #16 \n" "fmla v9.4s, v16.4s, %14.s[2] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r21 "fmla v8.4s, v17.4s, %15.s[2] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r22 "fmla v9.4s, v18.4s, %14.s[3] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r23 "fmla v8.4s, v19.4s, %15.s[0] \n" // r3 "prfm pldl1keep, [%4, #256] \n" "ld1 {v10.4s, v11.4s}, [%4] \n" // v10 v11 = r30 r34 "fmla v9.4s, v20.4s, %15.s[1] \n" "add %4, %4, #16 \n" "fmla v8.4s, v10.4s, %15.s[3] \n" "ext v12.16b, v10.16b, v11.16b, #4 \n" // r31 "fmla v9.4s, v11.4s, %16.s[3] \n" "ext v13.16b, v10.16b, v11.16b, #8 \n" // r32 "fmla v8.4s, v12.4s, %16.s[0] \n" "ext v14.16b, v10.16b, v11.16b, #12 \n" // r33 "fmla v9.4s, v13.4s, %16.s[1] \n" // r4 "prfm pldl1keep, [%5, #256] \n" "ld1 {v16.4s, v17.4s}, [%5] \n" // v16 v17 = r40 r44 "fmla v8.4s, v14.4s, %16.s[2] \n" "add %5, %5, #16 \n" "fmla v9.4s, v16.4s, %17.s[0] \n" "ext v18.16b, v16.16b, v17.16b, #4 \n" // r41 "fmla v8.4s, v17.4s, %18.s[0] \n" "ext v19.16b, v16.16b, v17.16b, #8 \n" // r42 "fmla v9.4s, v18.4s, %17.s[1] \n" "ext v20.16b, v16.16b, v17.16b, #12 \n" // r43 "fmla v8.4s, v19.4s, %17.s[2] \n" "fmla v9.4s, v20.4s, %17.s[3] \n" "fadd v8.4s, v8.4s, v9.4s \n" "st1 {v8.4s}, [%0], #16 \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415), // %15 "w"(_k16171819), // %16 "w"(_k20212223), // %17 "w"(_k24242424), // %18 "w"(_bias0) // %19 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20"); } #else if (nn > 0) { asm volatile( // r0 "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" // q10 q11 = r00 r04 "vmov q8, %q21 \n" // q8 = _bias0 "0: \n" "vmul.f32 q9, q10, %e14[0] \n" "vext.32 q12, q10, q11, #1 \n" // r01 "vmla.f32 q8, q11, %e15[0] \n" "vext.32 q13, q10, q11, #2 \n" // r02 "vmla.f32 q9, q12, %e14[1] \n" "vext.32 q12, q10, q11, #3 \n" // r03 "vmla.f32 q8, q13, %f14[0] \n" // r1 "pld [%3, #256] \n" "vld1.f32 {d28-d31}, [%3] \n" // q14 q15 = r10 r14 "vmla.f32 q9, q12, %f14[1] \n" "add %3, #16 \n" "vmla.f32 q8, q14, %e15[1] \n" "vext.32 q12, q14, q15, #1 \n" // r11 "vmla.f32 q9, q15, %e16[1] \n" "vext.32 q13, q14, q15, #2 \n" // r12 "vmla.f32 q8, q12, %f15[0] \n" "vext.32 q12, q14, q15, #3 \n" // r13 "vmla.f32 q9, q13, %f15[1] \n" // r2 "pld [%4, #256] \n" "vld1.f32 {d20-d23}, [%4] \n" // q10 q11 = r20 r24 "vmla.f32 q8, q12, %e16[0] \n" "add %4, #16 \n" "vmla.f32 q9, q10, %f16[0] \n" "vext.32 q12, q10, q11, #1 \n" // r21 "vmla.f32 q8, q11, %f17[0] \n" "vext.32 q13, q10, q11, #2 \n" // r22 "vmla.f32 q9, q12, %f16[1] \n" "vext.32 q12, q10, q11, #3 \n" // r23 "vmla.f32 q8, q13, %e17[0] \n" // r3 "pld [%5, #256] \n" "vld1.f32 {d28-d31}, [%5] \n" // q14 q15 = r30 r34 "vmla.f32 q9, q12, %e17[1] \n" "add %5, #16 \n" "vmla.f32 q8, q14, %f17[1] \n" "vext.32 q12, q14, q15, #1 \n" // r31 "vmla.f32 q9, q15, %f18[1] \n" "vext.32 q13, q14, q15, #2 \n" // r32 "vmla.f32 q8, q12, %e18[0] \n" "vext.32 q12, q14, q15, #3 \n" // r33 "vmla.f32 q9, q13, %e18[1] \n" // r4 "pld [%6, #256] \n" "vld1.f32 {d20-d23}, [%6] \n" // q10 q11 = r40 r44 "vmla.f32 q8, q12, %f18[0] \n" "add %6, #16 \n" "vmla.f32 q9, q10, %e19[0] \n" "vext.32 q12, q10, q11, #1 \n" // r41 "vmla.f32 q8, q11, %e20[0] \n" "vext.32 q13, q10, q11, #2 \n" // r42 "vmla.f32 q9, q12, %e19[1] \n" "vext.32 q12, q10, q11, #3 \n" // r43 "vmla.f32 q8, q13, %f19[0] \n" "add %2, #16 \n" "vmla.f32 q9, q12, %f19[1] \n" // r0 "pld [%2, #256] \n" "vld1.f32 {d20-d23}, [%2] \n" // q10 q11 = r00 r04 "vadd.f32 q9, q9, q8 \n" "vmov q8, %q21 \n" // q8 = _bias0 "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%1]! \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { #if __ARM_NEON #if __aarch64__ // TODO neon assembly optimize float sum = bias0; float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); float32x4_t _k_t4 = {}; _k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0); _k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1); _k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2); _k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3); float32x4_t _r_t4 = {}; _r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0); _r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1); _r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2); _r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3); _sum = vmlaq_f32(_sum, _r_t4, _k_t4); sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); *outptr = sum; r0++; r1++; r2++; r3++; r4++; outptr++; #else // TODO neon assembly optimize asm volatile( "veor q14, q14 \n" "vext.32 q14, %q19, q14, #3 \n" // q14 = bias0 0 0 0 "vld1.f32 {d16-d17}, [%1] \n" // q8 = r00 r01 r02 r03 "vld1.f32 {d18-d19}, [%2] \n" // q9 = r10 r11 r12 r13(X) "add r4, %1, #16 \n" "vld1.f32 {d19[1]}, [r4] \n" "vext.32 q9, q9, q9, #3 \n" // q9 = r04 r10 r11 r12 "vmla.f32 q14, q8, %q12 \n" "add r4, %2, #12 \n" "vld1.f32 {d20}, [r4] \n" // d20 = r13 r14 "vld1.f32 {d21}, [%3] \n" // d21 = r20 r21 "vmla.f32 q14, q9, %q13 \n" "add r4, %3, #8 \n" "vld1.f32 {d22-d23}, [r4] \n" // q11 = r22 r23 r24 X "vld1.f32 {d23[1]}, [%4] \n" // q11 = r22 r23 r24 r30 "vmla.f32 q14, q10, %q14 \n" "add r4, %4, #4 \n" "vld1.f32 {d24-d25}, [r4] \n" // q12 = r31 r32 r33 r34 "vmla.f32 q14, q11, %q15 \n" "vld1.f32 {d26-d27}, [%5] \n" // q13 = r40 r41 r42 r43 "vmla.f32 q14, q12, %q16 \n" "veor d30, d30 \n" "add r4, %5, #16 \n" "vld1.f32 {d30[0]}, [r4] \n" // d30 = r44 0 "vmla.f32 q14, q13, %q17 \n" "vmla.f32 d28, d30, %e18 \n" "add %1, #4 \n" // h-sum "vadd.f32 d28, d28, d29 \n" "add %2, #4 \n" "add %3, #4 \n" "vpadd.f32 d28, d28, d28 \n" "add %4, #4 \n" "add %5, #4 \n" "vst1.f32 {d28[0]}, [%0]! \n" : "=r"(outptr), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4) // %5 : "0"(outptr), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "w"(_k0123), // %12 "w"(_k4567), // %13 "w"(_k891011), // %14 "w"(_k12131415), // %15 "w"(_k16171819), // %16 "w"(_k20212223), // %17 "w"(_k24242424), // %18 "w"(_bias0) // %19 : "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif // __aarch64__ #else float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; *outptr = sum; r0++; r1++; r2++; r3++; r4++; outptr++; #endif } r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; } } } static void convdw5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; //int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; //int outch = top_blob.c; const int tailstep = w - 2 * outw + w; const int group = bottom_blob.c; const float* kernel = _kernel; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const float bias0 = bias ? bias[g] : 0.f; const float* kernel0 = kernel + g * 25; float* outptr = out; const float* img0 = bottom_blob.channel(g); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* r4 = img0 + w * 4; const float* k0 = kernel0; const float* k1 = kernel0 + 5; const float* k2 = kernel0 + 10; const float* k3 = kernel0 + 15; const float* k4 = kernel0 + 20; #if __ARM_NEON float32x4_t _k0123 = vld1q_f32(kernel0); float32x4_t _k4567 = vld1q_f32(kernel0 + 4); float32x4_t _k891011 = vld1q_f32(kernel0 + 8); float32x4_t _k12131415 = vld1q_f32(kernel0 + 12); float32x4_t _k16171819 = vld1q_f32(kernel0 + 16); float32x4_t _k20212223 = vld1q_f32(kernel0 + 20); float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]); float32x4_t _bias0 = vdupq_n_f32(bias0); #endif // __ARM_NEON int i = 0; // NOTE unroll outh 2 results somewhat speed drop :| (about -4%) // so we do not implement it here for (; i < outh; i++) { #if __ARM_NEON #if __aarch64__ int nn = outw >> 3; int remain = outw & 7; #else int nn = outw >> 2; int remain = outw & 3; #endif // __aarch64__ #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( // r0 "prfm pldl1keep, [%2, #256] \n" "ld2 {v16.4s, v17.4s}, [%2], #32 \n" // v16 v17 = r00 r01 "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "prfm pldl1keep, [%2, #256] \n" "ld2 {v18.4s, v19.4s}, [%2], #32 \n" // v18 v19 = r08 r09 "0: \n" "fmul v10.4s, v16.4s, %14.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v20.4s, v21.4s}, [%2] \n" // v20 v21 = r016 r017 "fmul v11.4s, v18.4s, %14.s[0] \n" "ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r02 "fmla v8.4s, v17.4s, %14.s[1] \n" "ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r010 "fmla v9.4s, v19.4s, %14.s[1] \n" "ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r03 "fmla v10.4s, v22.4s, %14.s[2] \n" "ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r011 "fmla v11.4s, v25.4s, %14.s[2] \n" "ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r04 "fmla v8.4s, v23.4s, %14.s[3] \n" "ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r012 "fmla v9.4s, v26.4s, %14.s[3] \n" // r1 "prfm pldl1keep, [%3, #256] \n" "ld2 {v12.4s, v13.4s}, [%3], #32 \n" // v12 v13 = r10 r11 "fmla v10.4s, v24.4s, %15.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v14.4s, v15.4s}, [%3], #32 \n" // v14 v15 = r18 r19 "fmla v11.4s, v27.4s, %15.s[0] \n" "fmla v8.4s, v12.4s, %15.s[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v20.4s, v21.4s}, [%3] \n" // v20 v21 = r116 r117 "fmla v9.4s, v14.4s, %15.s[1] \n" "ext v22.16b, v12.16b, v14.16b, #4 \n" // v22 = r12 "fmla v10.4s, v13.4s, %15.s[2] \n" "ext v25.16b, v14.16b, v20.16b, #4 \n" // v25 = r110 "fmla v11.4s, v15.4s, %15.s[2] \n" "ext v23.16b, v13.16b, v15.16b, #4 \n" // v23 = r13 "fmla v8.4s, v22.4s, %15.s[3] \n" "ext v26.16b, v15.16b, v21.16b, #4 \n" // v26 = r111 "fmla v9.4s, v25.4s, %15.s[3] \n" "ext v24.16b, v12.16b, v14.16b, #8 \n" // v24 = r14 "fmla v10.4s, v23.4s, %16.s[0] \n" "ext v27.16b, v14.16b, v20.16b, #8 \n" // v27 = r112 "fmla v11.4s, v26.4s, %16.s[0] \n" // r2 "prfm pldl1keep, [%4, #256] \n" "ld2 {v16.4s, v17.4s}, [%4], #32 \n" // v16 v17 = r20 r21 "fmla v8.4s, v24.4s, %16.s[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v18.4s, v19.4s}, [%4], #32 \n" // v18 v19 = r28 r29 "fmla v9.4s, v27.4s, %16.s[1] \n" "fmla v10.4s, v16.4s, %16.s[2] \n" "prfm pldl1keep, [%4, #256] \n" "ld2 {v20.4s, v21.4s}, [%4] \n" // v20 v21 = r216 r217 "fmla v11.4s, v18.4s, %16.s[2] \n" "ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r22 "fmla v8.4s, v17.4s, %16.s[3] \n" "ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r210 "fmla v9.4s, v19.4s, %16.s[3] \n" "ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r23 "fmla v10.4s, v22.4s, %17.s[0] \n" "ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r211 "fmla v11.4s, v25.4s, %17.s[0] \n" "ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r24 "fmla v8.4s, v23.4s, %17.s[1] \n" "ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r212 "fmla v9.4s, v26.4s, %17.s[1] \n" // r3 "prfm pldl1keep, [%5, #256] \n" "ld2 {v12.4s, v13.4s}, [%5], #32 \n" // v12 v13 = r30 r31 "fmla v10.4s, v24.4s, %17.s[2] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v14.4s, v15.4s}, [%5], #32 \n" // v14 v15 = r38 r39 "fmla v11.4s, v27.4s, %17.s[2] \n" "fmla v8.4s, v12.4s, %17.s[3] \n" "prfm pldl1keep, [%5, #256] \n" "ld2 {v20.4s, v21.4s}, [%5] \n" // v20 v21 = r316 r317 "fmla v9.4s, v14.4s, %17.s[3] \n" "ext v22.16b, v12.16b, v14.16b, #4 \n" // v22 = r32 "fmla v10.4s, v13.4s, %18.s[0] \n" "ext v25.16b, v14.16b, v20.16b, #4 \n" // v25 = r310 "fmla v11.4s, v15.4s, %18.s[0] \n" "ext v23.16b, v13.16b, v15.16b, #4 \n" // v23 = r33 "fmla v8.4s, v22.4s, %18.s[1] \n" "ext v26.16b, v15.16b, v21.16b, #4 \n" // v26 = r311 "fmla v9.4s, v25.4s, %18.s[1] \n" "ext v24.16b, v12.16b, v14.16b, #8 \n" // v24 = r34 "fmla v10.4s, v23.4s, %18.s[2] \n" "ext v27.16b, v14.16b, v20.16b, #8 \n" // v27 = r312 "fmla v11.4s, v26.4s, %18.s[2] \n" // r4 "prfm pldl1keep, [%6, #256] \n" "ld2 {v16.4s, v17.4s}, [%6], #32 \n" // v16 v17 = r40 r41 "fmla v8.4s, v24.4s, %18.s[3] \n" "prfm pldl1keep, [%6, #256] \n" "ld2 {v18.4s, v19.4s}, [%6], #32 \n" // v18 v19 = r48 r49 "fmla v9.4s, v27.4s, %18.s[3] \n" "fmla v10.4s, v16.4s, %19.s[0] \n" "prfm pldl1keep, [%6, #256] \n" "ld2 {v20.4s, v21.4s}, [%6] \n" // v20 v21 = r416 r417 "fmla v11.4s, v18.4s, %19.s[0] \n" "ext v22.16b, v16.16b, v18.16b, #4 \n" // v22 = r42 "fmla v8.4s, v17.4s, %19.s[1] \n" "ext v25.16b, v18.16b, v20.16b, #4 \n" // v25 = r410 "fmla v9.4s, v19.4s, %19.s[1] \n" "ext v23.16b, v17.16b, v19.16b, #4 \n" // v23 = r43 "fmla v10.4s, v22.4s, %19.s[2] \n" "ext v26.16b, v19.16b, v21.16b, #4 \n" // v26 = r411 "fmla v11.4s, v25.4s, %19.s[2] \n" "ext v24.16b, v16.16b, v18.16b, #8 \n" // v24 = r44 "fmla v8.4s, v23.4s, %19.s[3] \n" "ext v27.16b, v18.16b, v20.16b, #8 \n" // v27 = r412 "fmla v9.4s, v26.4s, %19.s[3] \n" "fmla v10.4s, v24.4s, %20.s[0] \n" // r0 "prfm pldl1keep, [%2, #256] \n" "ld2 {v16.4s, v17.4s}, [%2], #32 \n" // v16 v17 = r00 r01 "fmla v11.4s, v27.4s, %20.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v18.4s, v19.4s}, [%2], #32 \n" // v18 v19 = r08 r09 "fadd v10.4s, v8.4s, v10.4s \n" "fadd v11.4s, v9.4s, v11.4s \n" "subs %w0, %w0, #1 \n" "mov v8.16b, %21.16b \n" // v8 = _bias0 "mov v9.16b, %21.16b \n" // v9 = _bias0 "st1 {v10.4s, v11.4s}, [%1], #32 \n" "bne 0b \n" "sub %2, %2, #64 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #else if (nn > 0) { asm volatile( // r0 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2]! \n" // q10 q11 = r00 r01 "vmov q8, %q21 \n" "pld [%2, #128] \n" "vld2.f32 {d24-d25}, [%2] \n" // q12 = r08 x x "0: \n" "vmul.f32 q9, q10, %e14[0] \n" "vmov d26, d25 \n" // q13 = r09 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r02 "vmla.f32 q8, q11, %e14[1] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r03 "vmla.f32 q9, q14, %f14[0] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r04 "vmla.f32 q8, q15, %f14[1] \n" // r1 "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3]! \n" // q10 q11 = r10 r11 "vmla.f32 q9, q14, %e15[0] \n" "pld [%3, #128] \n" "vld2.f32 {d24-d25}, [%3] \n" // q12 = r18 x x "vmla.f32 q8, q10, %e15[1] \n" "vmov d26, d25 \n" // q13 = r19 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r12 "vmla.f32 q9, q11, %f15[0] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r13 "vmla.f32 q8, q14, %f15[1] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r14 "vmla.f32 q9, q15, %e16[0] \n" // r2 "pld [%4, #256] \n" "vld2.f32 {d20-d23}, [%4]! \n" // q10 q11 = r20 r21 "vmla.f32 q8, q14, %e16[1] \n" "pld [%4, #128] \n" "vld2.f32 {d24-d25}, [%4] \n" // q12 = r28 x x "vmla.f32 q9, q10, %f16[0] \n" "vmov d26, d25 \n" // q13 = r29 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r22 "vmla.f32 q8, q11, %f16[1] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r23 "vmla.f32 q9, q14, %e17[0] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r24 "vmla.f32 q8, q15, %e17[1] \n" // r3 "pld [%5, #256] \n" "vld2.f32 {d20-d23}, [%5]! \n" // q10 q11 = r30 r31 "vmla.f32 q9, q14, %f17[0] \n" "pld [%5, #128] \n" "vld2.f32 {d24-d25}, [%5] \n" // q12 = r38 x x "vmla.f32 q8, q10, %f17[1] \n" "vmov d26, d25 \n" // q13 = r39 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r32 "vmla.f32 q9, q11, %e18[0] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r33 "vmla.f32 q8, q14, %e18[1] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r34 "vmla.f32 q9, q15, %f18[0] \n" // r4 "pld [%6, #256] \n" "vld2.f32 {d20-d23}, [%6]! \n" // q10 q11 = r40 r41 "vmla.f32 q8, q14, %f18[1] \n" "pld [%6, #128] \n" "vld2.f32 {d24-d25}, [%6] \n" // q12 = r48 x x "vmla.f32 q9, q10, %e19[0] \n" "vmov d26, d25 \n" // q13 = r49 x x "vext.32 q14, q10, q12, #1 \n" // q14 = r42 "vmla.f32 q8, q11, %e19[1] \n" "vext.32 q15, q11, q13, #1 \n" // q15 = r43 "vmla.f32 q9, q14, %f19[0] \n" "vext.32 q14, q10, q12, #2 \n" // q14 = r44 "vmla.f32 q8, q15, %f19[1] \n" // r0 "pld [%2, #256] \n" "vld2.f32 {d20-d23}, [%2]! \n" // q10 q11 = r00 r01 "vmla.f32 q9, q14, %e20[0] \n" "pld [%2, #128] \n" "vld2.f32 {d24-d25}, [%2] \n" // q12 = r08 x x "vadd.f32 q9, q8, q9 \n" "vmov q8, %q21 \n" "subs %0, #1 \n" "vst1.f32 {d18-d19}, [%1]! \n" "bne 0b \n" "sub %2, #32 \n" : "=r"(nn), // %0 "=r"(outptr), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2), // %4 "=r"(r3), // %5 "=r"(r4) // %6 : "0"(nn), "1"(outptr), "2"(r0), "3"(r1), "4"(r2), "5"(r3), "6"(r4), "w"(_k0123), // %14 "w"(_k4567), // %15 "w"(_k891011), // %16 "w"(_k12131415), // %17 "w"(_k16171819), // %18 "w"(_k20212223), // %19 "w"(_k24242424), // %20 "w"(_bias0) // %21 : "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain > 0; remain--) { float sum = bias0; #if __ARM_NEON // TODO neon assembly optimize float32x4_t _r0 = vld1q_f32(r0); float32x4_t _sum = vmulq_f32(_r0, _k0123); float32x4_t _r1 = vld1q_f32(r1); _sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1)); float32x4_t _r2 = vld1q_f32(r2); _sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2)); float32x4_t _r3 = vld1q_f32(r3); _sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3)); float32x4_t _r4 = vld1q_f32(r4); _sum = vmlaq_f32(_sum, _r4, _k20212223); sum += r0[4] * k0[4]; sum += r1[4] * k1[4]; sum += r2[4] * k2[4]; sum += r3[4] * k3[4]; sum += r4[4] * k4[4]; float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum)); _ss = vpadd_f32(_ss, _ss); sum += vget_lane_f32(_ss, 0); #else sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r0[3] * k0[3]; sum += r0[4] * k0[4]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r1[3] * k1[3]; sum += r1[4] * k1[4]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; sum += r2[3] * k2[3]; sum += r2[4] * k2[4]; sum += r3[0] * k3[0]; sum += r3[1] * k3[1]; sum += r3[2] * k3[2]; sum += r3[3] * k3[3]; sum += r3[4] * k3[4]; sum += r4[0] * k4[0]; sum += r4[1] * k4[1]; sum += r4[2] * k4[2]; sum += r4[3] * k4[3]; sum += r4[4] * k4[4]; #endif *outptr = sum; r0 += 2; r1 += 2; r2 += 2; r3 += 2; r4 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } }
parallel_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Denis Demidov // #if !defined(KRATOS_PARALLEL_UTILITIES_H_INCLUDED ) #define KRATOS_PARALLEL_UTILITIES_H_INCLUDED // System includes #include<iostream> #include<array> #include<vector> #include<tuple> #include<cmath> #include<limits> #include<omp.h> #include <future> #include <thread> #include "utilities/reduction_utilities.h" // External includes // Project includes #include "includes/define.h" #include "includes/global_variables.h" namespace Kratos { ///@addtogroup KratosCore //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** /** @param TContainerType - the type of the container used in the loop (must provide random access iterators) * @param TIteratorType - type of iterator (by default as provided by the TContainerType) * @param TMaxThreads - maximum number of threads allowed in the partitioning. * must be known at compile time to avoid heap allocations in the partitioning */ template< class TContainerType, class TIteratorType=typename TContainerType::iterator, int TMaxThreads=Globals::MaxAllowedThreads > class BlockPartition { public: /** @param it_begin - iterator pointing at the beginning of the container * @param it_end - iterator pointing to the end of the container * @param Nchunks - number of threads to be used in the loop (must be lower than TMaxThreads) */ BlockPartition(TIteratorType it_begin, TIteratorType it_end, int Nchunks = omp_get_max_threads()) : mit_begin(it_begin), mit_end(it_end), mNchunks(Nchunks) { ptrdiff_t mBlockPartitionSize = (it_end-it_begin) / mNchunks; mBlockPartition[0] = it_begin; mBlockPartition[mNchunks] = it_end; for(int i = 1; i < mNchunks; i++) mBlockPartition[i] = mBlockPartition[i-1] + mBlockPartitionSize ; } /** @param rData - the continer to be iterated upon * @param Nchunks - number of threads to be used in the loop (must be lower than TMaxThreads) */ BlockPartition(TContainerType& rData, int Nchunks = omp_get_max_threads()) : BlockPartition(rData.begin(), rData.end(), Nchunks) {} virtual ~BlockPartition() {} /** @brief simple iteration loop. f called on every entry in rData * @param f - must be a unary function accepting as input TContainerType::value_type& */ template <class TUnaryFunction> inline void for_each(TUnaryFunction&& f) { #pragma omp parallel for for(int i=0; i<mNchunks; ++i) { for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it) { f(*it); //note that we pass the value to the function, not the iterator } } } /** @brie loop allowing reductions. f called on every entry in rData * the function f needs to return the values to be used by the reducer * @param TReducer template parameter specifying the reduction operation to be done * @param f - must be a unary function accepting as input TContainerType::value_type& */ template <class TReducer, class TUnaryFunction> inline typename TReducer::value_type for_each(TUnaryFunction &&f) { TReducer global_reducer; #pragma omp parallel for for(int i=0; i<mNchunks; ++i) { TReducer local_reducer; for (auto it = mBlockPartition[i]; it != mBlockPartition[i+1]; ++it) { local_reducer.LocalReduce(f(*it)); } global_reducer.ThreadSafeReduce(local_reducer); } return global_reducer.GetValue(); } private: TIteratorType mit_begin, mit_end; int mNchunks; std::array<TIteratorType, TMaxThreads> mBlockPartition; }; /** @brief simplified version of the basic loop (without reduction) to enable template type deduction * @param v - containers to be looped upon * @param func - must be a unary function accepting as input TContainerType::value_type& * */ template <class TContainerType, class TFunctionType> void block_for_each(TContainerType &&v, TFunctionType &&func) { BlockPartition<typename std::decay<TContainerType>::type>(std::forward<TContainerType>(v)).for_each(std::forward<TFunctionType>(func)); } template <class TReducer, class TContainerType, class TFunctionType> typename TReducer::value_type block_for_each(TContainerType &&v, TFunctionType &&func) { return BlockPartition<typename std::decay<TContainerType>::type> (std::forward<TContainerType>(v)).template for_each<TReducer>(std::forward<TFunctionType>(func)); } //*********************************************************************************** //*********************************************************************************** //*********************************************************************************** /** @brief This class is useful for index iteration over containers * @param TIndexType type of index to be used in the loop * @param TMaxThreads - maximum number of threads allowed in the partitioning. * must be known at compile time to avoid heap allocations in the partitioning */ template<class TIndexType, int TMaxThreads=Globals::MaxAllowedThreads> class IndexPartition { public: /** @brief constructor using the size of the partition to be used * @param Size - the size of the partition * @param Nchunks - number of threads to be used in the loop (must be lower than TMaxThreads) */ IndexPartition(TIndexType Size, int Nchunks = omp_get_max_threads()) : mSize(Size), mNchunks(Nchunks) { int mBlockPartitionSize = mSize / mNchunks; mBlockPartition[0] = 0; mBlockPartition[mNchunks] = mSize; for(int i = 1; i < mNchunks; i++) mBlockPartition[i] = mBlockPartition[i-1] + mBlockPartitionSize ; } virtual ~IndexPartition() {} //NOT COMMENTING IN DOXYGEN - THIS SHOULD BE SORT OF HIDDEN UNTIL GIVEN PRIME TIME //pure c++11 version (can handle exceptions) template <class TUnaryFunction> inline void for_pure_c11(TUnaryFunction &&f) { std::vector< std::future<void> > runners(mNchunks); const auto& partition = mBlockPartition; for(int i=0; i<mNchunks; ++i) { runners[i] = std::async(std::launch::async, [&partition, i, &f]() { for (auto k = partition[i]; k < partition[i+1]; ++k) f(k); }); } //here we impose a syncronization and we check the exceptions for(int i=0; i<mNchunks; ++i) { try { runners[i].get(); } catch(Exception& e) { KRATOS_ERROR << std::endl << "THREAD number: " << i << " caught exception " << e.what() << std::endl; } catch(std::exception& e) { KRATOS_ERROR << std::endl << "THREAD number: " << i << " caught exception " << e.what() << std::endl; } catch(...) { KRATOS_ERROR << std::endl << "unknown error" << std::endl; } } } /** simple version of for_each (no reduction) to be called for each index in the partition * @param f - must be a unary function accepting as input IndexType */ template <class TUnaryFunction> inline void for_each(TUnaryFunction &&f) { #pragma omp parallel for for(int i=0; i<mNchunks; ++i) { for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) { f(k); //note that we pass a reference to the value, not the iterator } } } /** version with reduction to be called for each index in the partition * function f is expected to return the values to be reduced * @param TReducer - template parameter specifying the type of reducer to be applied * @param f - must be a unary function accepting as input IndexType */ template <class TReducer, class TUnaryFunction> inline typename TReducer::value_type for_each(TUnaryFunction &&f) { TReducer global_reducer; #pragma omp parallel for for(int i=0; i<mNchunks; ++i) { TReducer local_reducer; for (auto k = mBlockPartition[i]; k < mBlockPartition[i+1]; ++k) { local_reducer.LocalReduce(f(k)); } global_reducer.ThreadSafeReduce(local_reducer); } return global_reducer.GetValue(); } private: TIndexType mSize; int mNchunks; std::array<TIndexType, TMaxThreads> mBlockPartition; }; } // namespace Kratos. #endif // KRATOS_PARALLEL_UTILITIES_H_INCLUDED defined
csr.c
/*! * \file * * \brief Various routines with dealing with CSR matrices * * \author George Karypis * \version\verbatim $Id: csr.c 13437 2013-01-11 21:54:10Z karypis $ \endverbatim */ #include <GKlib.h> #define OMPMINOPS 50000 /*************************************************************************/ /*! Allocate memory for a CSR matrix and initializes it \returns the allocated matrix. The various fields are set to NULL. */ /**************************************************************************/ gk_csr_t *gk_csr_Create() { gk_csr_t *mat; mat = (gk_csr_t *)gk_malloc(sizeof(gk_csr_t), "gk_csr_Create: mat"); gk_csr_Init(mat); return mat; } /*************************************************************************/ /*! Initializes the matrix \param mat is the matrix to be initialized. */ /*************************************************************************/ void gk_csr_Init(gk_csr_t *mat) { memset(mat, 0, sizeof(gk_csr_t)); mat->nrows = mat->ncols = -1; } /*************************************************************************/ /*! Frees all the memory allocated for matrix. \param mat is the matrix to be freed. */ /*************************************************************************/ void gk_csr_Free(gk_csr_t **mat) { if (*mat == NULL) return; gk_csr_FreeContents(*mat); gk_free((void **)mat, LTERM); } /*************************************************************************/ /*! Frees only the memory allocated for the matrix's different fields and sets them to NULL. \param mat is the matrix whose contents will be freed. */ /*************************************************************************/ void gk_csr_FreeContents(gk_csr_t *mat) { gk_free((void *)&mat->rowptr, &mat->rowind, &mat->rowval, &mat->rowids, &mat->colptr, &mat->colind, &mat->colval, &mat->colids, &mat->rnorms, &mat->cnorms, &mat->rsums, &mat->csums, &mat->rsizes, &mat->csizes, &mat->rvols, &mat->cvols, &mat->rwgts, &mat->cwgts, LTERM); } /*************************************************************************/ /*! Returns a copy of a matrix. \param mat is the matrix to be duplicated. \returns the newly created copy of the matrix. */ /**************************************************************************/ gk_csr_t *gk_csr_Dup(gk_csr_t *mat) { gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(mat->nrows+1, mat->rowptr, gk_zmalloc(mat->nrows+1, "gk_csr_Dup: rowptr")); if (mat->rowids) nmat->rowids = gk_icopy(mat->nrows, mat->rowids, gk_imalloc(mat->nrows, "gk_csr_Dup: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(mat->nrows, mat->rnorms, gk_fmalloc(mat->nrows, "gk_csr_Dup: rnorms")); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[mat->nrows], mat->rowind, gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[mat->nrows], mat->rowval, gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Dup: rowval")); /* copy the col structure */ if (mat->colptr) nmat->colptr = gk_zcopy(mat->ncols+1, mat->colptr, gk_zmalloc(mat->ncols+1, "gk_csr_Dup: colptr")); if (mat->colids) nmat->colids = gk_icopy(mat->ncols, mat->colids, gk_imalloc(mat->ncols, "gk_csr_Dup: colids")); if (mat->cnorms) nmat->cnorms = gk_fcopy(mat->ncols, mat->cnorms, gk_fmalloc(mat->ncols, "gk_csr_Dup: cnorms")); if (mat->colind) nmat->colind = gk_icopy(mat->colptr[mat->ncols], mat->colind, gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colind")); if (mat->colval) nmat->colval = gk_fcopy(mat->colptr[mat->ncols], mat->colval, gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Dup: colval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containint a set of consecutive rows. \param mat is the original matrix. \param rstart is the starting row. \param nrows is the number of rows from rstart to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractSubmatrix(gk_csr_t *mat, int rstart, int nrows) { ssize_t i; gk_csr_t *nmat; if (rstart+nrows > mat->nrows) return NULL; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; /* copy the row structure */ if (mat->rowptr) nmat->rowptr = gk_zcopy(nrows+1, mat->rowptr+rstart, gk_zmalloc(nrows+1, "gk_csr_ExtractSubmatrix: rowptr")); for (i=nrows; i>=0; i--) nmat->rowptr[i] -= nmat->rowptr[0]; ASSERT(nmat->rowptr[0] == 0); if (mat->rowids) nmat->rowids = gk_icopy(nrows, mat->rowids+rstart, gk_imalloc(nrows, "gk_csr_ExtractSubmatrix: rowids")); if (mat->rnorms) nmat->rnorms = gk_fcopy(nrows, mat->rnorms+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rnorms")); if (mat->rsums) nmat->rsums = gk_fcopy(nrows, mat->rsums+rstart, gk_fmalloc(nrows, "gk_csr_ExtractSubmatrix: rsums")); ASSERT(nmat->rowptr[nrows] == mat->rowptr[rstart+nrows]-mat->rowptr[rstart]); if (mat->rowind) nmat->rowind = gk_icopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowind+mat->rowptr[rstart], gk_imalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowind")); if (mat->rowval) nmat->rowval = gk_fcopy(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], mat->rowval+mat->rowptr[rstart], gk_fmalloc(mat->rowptr[rstart+nrows]-mat->rowptr[rstart], "gk_csr_ExtractSubmatrix: rowval")); return nmat; } /*************************************************************************/ /*! Returns a submatrix containing a certain set of rows. \param mat is the original matrix. \param nrows is the number of rows to extract. \param rind is the set of row numbers to extract. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractRows(gk_csr_t *mat, int nrows, int *rind) { ssize_t i, ii, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = nrows; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<nrows; i++) nnz += mat->rowptr[rind[i]+1]-mat->rowptr[rind[i]]; nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, ii=0; ii<nrows; ii++) { i = rind[ii]; gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Returns a submatrix corresponding to a specified partitioning of rows. \param mat is the original matrix. \param part is the partitioning vector of the rows. \param pid is the partition ID that will be extracted. \returns the row structure of the newly created submatrix. */ /**************************************************************************/ gk_csr_t *gk_csr_ExtractPartition(gk_csr_t *mat, int *part, int pid) { ssize_t i, j, nnz; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = 0; nmat->ncols = mat->ncols; for (nnz=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { nmat->nrows++; nnz += mat->rowptr[i+1]-mat->rowptr[i]; } } nmat->rowptr = gk_zmalloc(nmat->nrows+1, "gk_csr_ExtractPartition: rowptr"); nmat->rowind = gk_imalloc(nnz, "gk_csr_ExtractPartition: rowind"); nmat->rowval = gk_fmalloc(nnz, "gk_csr_ExtractPartition: rowval"); nmat->rowptr[0] = 0; for (nnz=0, j=0, i=0; i<mat->nrows; i++) { if (part[i] == pid) { gk_icopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowind+mat->rowptr[i], nmat->rowind+nnz); gk_fcopy(mat->rowptr[i+1]-mat->rowptr[i], mat->rowval+mat->rowptr[i], nmat->rowval+nnz); nnz += mat->rowptr[i+1]-mat->rowptr[i]; nmat->rowptr[++j] = nnz; } } ASSERT(j == nmat->nrows); return nmat; } /*************************************************************************/ /*! Splits the matrix into multiple sub-matrices based on the provided color array. \param mat is the original matrix. \param color is an array of size equal to the number of non-zeros in the matrix (row-wise structure). The matrix is split into as many parts as the number of colors. For meaningfull results, the colors should be numbered consecutively starting from 0. \returns an array of matrices for each supplied color number. */ /**************************************************************************/ gk_csr_t **gk_csr_Split(gk_csr_t *mat, int *color) { ssize_t i, j; int nrows, ncolors; ssize_t *rowptr; int *rowind; float *rowval; gk_csr_t **smats; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; ncolors = gk_imax(rowptr[nrows], color)+1; smats = (gk_csr_t **)gk_malloc(sizeof(gk_csr_t *)*ncolors, "gk_csr_Split: smats"); for (i=0; i<ncolors; i++) { smats[i] = gk_csr_Create(); smats[i]->nrows = mat->nrows; smats[i]->ncols = mat->ncols; smats[i]->rowptr = gk_zsmalloc(nrows+1, 0, "gk_csr_Split: smats[i]->rowptr"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) smats[color[j]]->rowptr[i]++; } for (i=0; i<ncolors; i++) MAKECSR(j, nrows, smats[i]->rowptr); for (i=0; i<ncolors; i++) { smats[i]->rowind = gk_imalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowind"); smats[i]->rowval = gk_fmalloc(smats[i]->rowptr[nrows], "gk_csr_Split: smats[i]->rowval"); } for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { smats[color[j]]->rowind[smats[color[j]]->rowptr[i]] = rowind[j]; smats[color[j]]->rowval[smats[color[j]]->rowptr[i]] = rowval[j]; smats[color[j]]->rowptr[i]++; } } for (i=0; i<ncolors; i++) SHIFTCSR(j, nrows, smats[i]->rowptr); return smats; } /**************************************************************************/ /*! Reads a CSR matrix from the supplied file and stores it the matrix's forward structure. \param filename is the file that stores the data. \param format is either GK_CSR_FMT_METIS, GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL specifying the type of the input format. The GK_CSR_FMT_CSR does not contain a header line, whereas the GK_CSR_FMT_BINROW is a binary format written by gk_csr_Write() using the same format specifier. \param readvals is either 1 or 0, indicating if the CSR file contains values or it does not. It only applies when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0, indicating if the numbering of the indices start from 1 or 0, respectively. If they start from 1, they are automatically decreamented during input so that they will start from 0. It only applies when GK_CSR_FMT_CSR is used. \returns the matrix that was read. */ /**************************************************************************/ gk_csr_t *gk_csr_Read(char *filename, int format, int readvals, int numbering) { ssize_t i, k, l; size_t nfields, nrows, ncols, nnz, fmt, ncon; size_t lnlen; ssize_t *rowptr; int *rowind, ival; float *rowval=NULL, fval; int readsizes, readwgts; char *line=NULL, *head, *tail, fmtstr[256]; FILE *fpin; gk_csr_t *mat=NULL; if (!gk_fexists(filename)) gk_errexit(SIGERR, "File %s does not exist!\n", filename); if (format == GK_CSR_FMT_BINROW) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->rowptr = gk_zmalloc(mat->nrows+1, "gk_csr_Read: rowptr"); if (fread(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpin) != mat->nrows+1) gk_errexit(SIGERR, "Failed to read the rowptr from file %s!\n", filename); mat->rowind = gk_imalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowind"); if (fread(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowind from file %s!\n", filename); if (readvals == 1) { mat->rowval = gk_fmalloc(mat->rowptr[mat->nrows], "gk_csr_Read: rowval"); if (fread(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpin) != mat->rowptr[mat->nrows]) gk_errexit(SIGERR, "Failed to read the rowval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_BINCOL) { mat = gk_csr_Create(); fpin = gk_fopen(filename, "rb", "gk_csr_Read: fpin"); if (fread(&(mat->nrows), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the nrows from file %s!\n", filename); if (fread(&(mat->ncols), sizeof(int32_t), 1, fpin) != 1) gk_errexit(SIGERR, "Failed to read the ncols from file %s!\n", filename); mat->colptr = gk_zmalloc(mat->ncols+1, "gk_csr_Read: colptr"); if (fread(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpin) != mat->ncols+1) gk_errexit(SIGERR, "Failed to read the colptr from file %s!\n", filename); mat->colind = gk_imalloc(mat->colptr[mat->ncols], "gk_csr_Read: colind"); if (fread(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colind from file %s!\n", filename); if (readvals) { mat->colval = gk_fmalloc(mat->colptr[mat->ncols], "gk_csr_Read: colval"); if (fread(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpin) != mat->colptr[mat->ncols]) gk_errexit(SIGERR, "Failed to read the colval from file %s!\n", filename); } gk_fclose(fpin); return mat; } if (format == GK_CSR_FMT_CLUTO) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); if (sscanf(line, "%zu %zu %zu", &nrows, &ncols, &nnz) != 3) gk_errexit(SIGERR, "Header line must contain 3 integers.\n"); readsizes = 0; readwgts = 0; readvals = 1; numbering = 1; } else if (format == GK_CSR_FMT_METIS) { fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); do { if (gk_getline(&line, &lnlen, fpin) <= 0) gk_errexit(SIGERR, "Premature end of input file: file:%s\n", filename); } while (line[0] == '%'); fmt = ncon = 0; nfields = sscanf(line, "%zu %zu %zu %zu", &nrows, &nnz, &fmt, &ncon); if (nfields < 2) gk_errexit(SIGERR, "Header line must contain at least 2 integers (#vtxs and #edges).\n"); ncols = nrows; nnz *= 2; if (fmt > 111) gk_errexit(SIGERR, "Cannot read this type of file format [fmt=%zu]!\n", fmt); sprintf(fmtstr, "%03zu", fmt%1000); readsizes = (fmtstr[0] == '1'); readwgts = (fmtstr[1] == '1'); readvals = (fmtstr[2] == '1'); numbering = 1; ncon = (ncon == 0 ? 1 : ncon); } else { readsizes = 0; readwgts = 0; gk_getfilestats(filename, &nrows, &nnz, NULL, NULL); if (readvals == 1 && nnz%2 == 1) gk_errexit(SIGERR, "Error: The number of numbers (%zd %d) in the input file is not even.\n", nnz, readvals); if (readvals == 1) nnz = nnz/2; fpin = gk_fopen(filename, "r", "gk_csr_Read: fpin"); } mat = gk_csr_Create(); mat->nrows = nrows; rowptr = mat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Read: rowptr"); rowind = mat->rowind = gk_imalloc(nnz, "gk_csr_Read: rowind"); if (readvals != 2) rowval = mat->rowval = gk_fsmalloc(nnz, 1.0, "gk_csr_Read: rowval"); if (readsizes) mat->rsizes = gk_fsmalloc(nrows, 0.0, "gk_csr_Read: rsizes"); if (readwgts) mat->rwgts = gk_fsmalloc(nrows*ncon, 0.0, "gk_csr_Read: rwgts"); /*---------------------------------------------------------------------- * Read the sparse matrix file *---------------------------------------------------------------------*/ numbering = (numbering ? - 1 : 0); for (ncols=0, rowptr[0]=0, k=0, i=0; i<nrows; i++) { do { if (gk_getline(&line, &lnlen, fpin) == -1) gk_errexit(SIGERR, "Premature end of input file: file while reading row %d\n", i); } while (line[0] == '%'); head = line; tail = NULL; /* Read vertex sizes */ if (readsizes) { #ifdef __MSC__ mat->rsizes[i] = (float)strtod(head, &tail); #else mat->rsizes[i] = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "The line for vertex %zd does not have size information\n", i+1); if (mat->rsizes[i] < 0) errexit("The size for vertex %zd must be >= 0\n", i+1); head = tail; } /* Read vertex weights */ if (readwgts) { for (l=0; l<ncon; l++) { #ifdef __MSC__ mat->rwgts[i*ncon+l] = (float)strtod(head, &tail); #else mat->rwgts[i*ncon+l] = strtof(head, &tail); #endif if (tail == head) errexit("The line for vertex %zd does not have enough weights " "for the %d constraints.\n", i+1, ncon); if (mat->rwgts[i*ncon+l] < 0) errexit("The weight vertex %zd and constraint %zd must be >= 0\n", i+1, l); head = tail; } } /* Read the rest of the row */ while (1) { ival = (int)strtol(head, &tail, 0); if (tail == head) break; head = tail; if ((rowind[k] = ival + numbering) < 0) gk_errexit(SIGERR, "Error: Invalid column number %d at row %zd.\n", ival, i); ncols = gk_max(rowind[k], ncols); if (readvals == 1) { #ifdef __MSC__ fval = (float)strtod(head, &tail); #else fval = strtof(head, &tail); #endif if (tail == head) gk_errexit(SIGERR, "Value could not be found for column! Row:%zd, NNZ:%zd\n", i, k); head = tail; rowval[k] = fval; } k++; } rowptr[i+1] = k; } if (format == GK_CSR_FMT_METIS) { ASSERT(ncols+1 == mat->nrows); mat->ncols = mat->nrows; } else { mat->ncols = ncols+1; } if (k != nnz) gk_errexit(SIGERR, "gk_csr_Read: Something wrong with the number of nonzeros in " "the input file. NNZ=%zd, ActualNNZ=%zd.\n", nnz, k); gk_fclose(fpin); gk_free((void **)&line, LTERM); return mat; } /**************************************************************************/ /*! Writes the row-based structure of a matrix into a file. \param mat is the matrix to be written, \param filename is the name of the output file. \param format is one of: GK_CSR_FMT_CLUTO, GK_CSR_FMT_CSR, GK_CSR_FMT_BINROW, GK_CSR_FMT_BINCOL. \param writevals is either 1 or 0 indicating if the values will be written or not. This is only applicable when GK_CSR_FMT_CSR is used. \param numbering is either 1 or 0 indicating if the internal 0-based numbering will be shifted by one or not during output. This is only applicable when GK_CSR_FMT_CSR is used. */ /**************************************************************************/ void gk_csr_Write(gk_csr_t *mat, char *filename, int format, int writevals, int numbering) { ssize_t i, j; FILE *fpout; if (format == GK_CSR_FMT_BINROW) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->rowptr, sizeof(ssize_t), mat->nrows+1, fpout); fwrite(mat->rowind, sizeof(int32_t), mat->rowptr[mat->nrows], fpout); if (writevals) fwrite(mat->rowval, sizeof(float), mat->rowptr[mat->nrows], fpout); gk_fclose(fpout); return; } if (format == GK_CSR_FMT_BINCOL) { if (filename == NULL) gk_errexit(SIGERR, "The filename parameter cannot be NULL.\n"); fpout = gk_fopen(filename, "wb", "gk_csr_Write: fpout"); fwrite(&(mat->nrows), sizeof(int32_t), 1, fpout); fwrite(&(mat->ncols), sizeof(int32_t), 1, fpout); fwrite(mat->colptr, sizeof(ssize_t), mat->ncols+1, fpout); fwrite(mat->colind, sizeof(int32_t), mat->colptr[mat->ncols], fpout); if (writevals) fwrite(mat->colval, sizeof(float), mat->colptr[mat->ncols], fpout); gk_fclose(fpout); return; } if (filename) fpout = gk_fopen(filename, "w", "gk_csr_Write: fpout"); else fpout = stdout; if (format == GK_CSR_FMT_CLUTO) { fprintf(fpout, "%d %d %zd\n", mat->nrows, mat->ncols, mat->rowptr[mat->nrows]); writevals = 1; numbering = 1; } for (i=0; i<mat->nrows; i++) { for (j=mat->rowptr[i]; j<mat->rowptr[i+1]; j++) { fprintf(fpout, " %d", mat->rowind[j]+(numbering ? 1 : 0)); if (writevals) fprintf(fpout, " %f", mat->rowval[j]); } fprintf(fpout, "\n"); } if (filename) gk_fclose(fpout); } /*************************************************************************/ /*! Prunes certain rows/columns of the matrix. The prunning takes place by analyzing the row structure of the matrix. The prunning takes place by removing rows/columns but it does not affect the numbering of the remaining rows/columns. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param minf is the minimum number of rows (columns) that a column (row) must be present in order to be kept, \param maxf is the maximum number of rows (columns) that a column (row) must be present at in order to be kept. \returns the prunned matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_Prune(gk_csr_t *mat, int what, int minf, int maxf) { ssize_t i, j, nnz; int nrows, ncols; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind, *collen; float *rowval, *nrowval; gk_csr_t *nmat; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_Prune: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_Prune: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_Prune: nrowval"); switch (what) { case GK_CSR_COL: collen = gk_ismalloc(ncols, 0, "gk_csr_Prune: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { ASSERT(rowind[j] < ncols); collen[rowind[j]]++; } } for (i=0; i<ncols; i++) collen[i] = (collen[i] >= minf && collen[i] <= maxf ? 1 : 0); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (collen[rowind[j]]) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } gk_free((void **)&collen, LTERM); break; case GK_CSR_ROW: nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { if (rowptr[i+1]-rowptr[i] >= minf && rowptr[i+1]-rowptr[i] <= maxf) { for (j=rowptr[i]; j<rowptr[i+1]; j++, nnz++) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight entries whose sum accounts for a certain fraction of the overall weight of the row/column. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param norm indicates the norm that will be used to aggregate the weights and possible values are 1 or 2, \param fraction is the fraction of the overall norm that will be retained by the kept entries. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_LowFilter(gk_csr_t *mat, int what, int norm, float fraction) { ssize_t i, j, nnz; int nrows, ncols, ncand, maxlen=0; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval, rsum, tsum; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) maxlen = gk_max(maxlen, colptr[i+1]-colptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<ncols; i++) { for (tsum=0.0, ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; tsum += (norm == 1 ? colval[j] : colval[j]*colval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } gk_free((void **)&cand, LTERM); } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); for (i=0; i<nrows; i++) maxlen = gk_max(maxlen, rowptr[i+1]-rowptr[i]); #pragma omp parallel private(i, j, ncand, rsum, tsum, cand) { cand = gk_fkvmalloc(maxlen, "gk_csr_LowFilter: cand"); #pragma omp for schedule(static) for (i=0; i<nrows; i++) { for (tsum=0.0, ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; tsum += (norm == 1 ? rowval[j] : rowval[j]*rowval[j]); } gk_fkvsortd(ncand, cand); for (rsum=0.0, j=0; j<ncand && rsum<=fraction*tsum; j++) { rsum += (norm == 1 ? cand[j].key : cand[j].key*cand[j].key); nrowind[rowptr[i]+j] = cand[j].val; nrowval[rowptr[i]+j] = cand[j].key; } nrowptr[i+1] = rowptr[i]+j; } gk_free((void **)&cand, LTERM); } /* compact nrowind/nrowval */ nrowptr[0] = nnz = 0; for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i+1]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the highest weight top-K entries along each row/column and those entries whose weight is greater than a specified value. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param topk is the number of the highest weight entries to keep. \param keepval is the weight of a term above which will be kept. This is used to select additional terms past the first topk. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_TopKPlusFilter(gk_csr_t *mat, int what, int topk, float keepval) { ssize_t i, j, k, nnz; int nrows, ncols, ncand; ssize_t *rowptr, *colptr, *nrowptr; int *rowind, *colind, *nrowind; float *rowval, *colval, *nrowval; gk_csr_t *nmat; gk_fkv_t *cand; nmat = gk_csr_Create(); nrows = nmat->nrows = mat->nrows; ncols = nmat->ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_LowFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_LowFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_LowFilter: nrowval"); switch (what) { case GK_CSR_COL: if (mat->colptr == NULL) gk_errexit(SIGERR, "Cannot filter columns when column-based structure has not been created.\n"); cand = gk_fkvmalloc(nrows, "gk_csr_LowFilter: cand"); gk_zcopy(nrows+1, rowptr, nrowptr); for (i=0; i<ncols; i++) { for (ncand=0, j=colptr[i]; j<colptr[i+1]; j++, ncand++) { cand[ncand].val = colind[j]; cand[ncand].key = colval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++) { nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } for (; j<ncand; j++) { if (cand[j].key < keepval) break; nrowind[nrowptr[cand[j].val]] = i; nrowval[nrowptr[cand[j].val]] = cand[j].key; nrowptr[cand[j].val]++; } } /* compact the nrowind/nrowval */ for (nnz=0, i=0; i<nrows; i++) { for (j=rowptr[i]; j<nrowptr[i]; j++, nnz++) { nrowind[nnz] = nrowind[j]; nrowval[nnz] = nrowval[j]; } nrowptr[i] = nnz; } SHIFTCSR(i, nrows, nrowptr); gk_free((void **)&cand, LTERM); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); cand = gk_fkvmalloc(ncols, "gk_csr_LowFilter: cand"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { for (ncand=0, j=rowptr[i]; j<rowptr[i+1]; j++, ncand++) { cand[ncand].val = rowind[j]; cand[ncand].key = rowval[j]; } gk_fkvsortd(ncand, cand); k = gk_min(topk, ncand); for (j=0; j<k; j++, nnz++) { nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } for (; j<ncand; j++, nnz++) { if (cand[j].key < keepval) break; nrowind[nnz] = cand[j].val; nrowval[nnz] = cand[j].key; } nrowptr[i+1] = nnz; } gk_free((void **)&cand, LTERM); break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Eliminates certain entries from the rows/columns of the matrix. The filtering takes place by keeping only the terms whose contribution to the total length of the document is greater than a user-splied multiple over the average. This routine assumes that the vectors are normalized to be unit length. \param mat the matrix to be prunned, \param what indicates if the rows (GK_CSR_ROW) or the columns (GK_CSR_COL) of the matrix will be prunned, \param zscore is the multiplicative factor over the average contribution to the length of the document. \returns the filtered matrix consisting only of its row-based structure. The input matrix is not modified. */ /**************************************************************************/ gk_csr_t *gk_csr_ZScoreFilter(gk_csr_t *mat, int what, float zscore) { ssize_t i, j, nnz; int nrows; ssize_t *rowptr, *nrowptr; int *rowind, *nrowind; float *rowval, *nrowval, avgwgt; gk_csr_t *nmat; nmat = gk_csr_Create(); nmat->nrows = mat->nrows; nmat->ncols = mat->ncols; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; nrowptr = nmat->rowptr = gk_zmalloc(nrows+1, "gk_csr_ZScoreFilter: nrowptr"); nrowind = nmat->rowind = gk_imalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowind"); nrowval = nmat->rowval = gk_fmalloc(rowptr[nrows], "gk_csr_ZScoreFilter: nrowval"); switch (what) { case GK_CSR_COL: gk_errexit(SIGERR, "This has not been implemented yet.\n"); break; case GK_CSR_ROW: if (mat->rowptr == NULL) gk_errexit(SIGERR, "Cannot filter rows when row-based structure has not been created.\n"); nrowptr[0] = 0; for (nnz=0, i=0; i<nrows; i++) { avgwgt = zscore/(rowptr[i+1]-rowptr[i]); for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] > avgwgt) { nrowind[nnz] = rowind[j]; nrowval[nnz] = rowval[j]; nnz++; } } nrowptr[i+1] = nnz; } break; default: gk_csr_Free(&nmat); gk_errexit(SIGERR, "Unknown prunning type of %d\n", what); return NULL; } return nmat; } /*************************************************************************/ /*! Compacts the column-space of the matrix by removing empty columns. As a result of the compaction, the column numbers are renumbered. The compaction operation is done in place and only affects the row-based representation of the matrix. The new columns are ordered in decreasing frequency. \param mat the matrix whose empty columns will be removed. */ /**************************************************************************/ void gk_csr_CompactColumns(gk_csr_t *mat) { ssize_t i; int nrows, ncols, nncols; ssize_t *rowptr; int *rowind, *colmap; gk_ikv_t *clens; nrows = mat->nrows; ncols = mat->ncols; rowptr = mat->rowptr; rowind = mat->rowind; colmap = gk_imalloc(ncols, "gk_csr_CompactColumns: colmap"); clens = gk_ikvmalloc(ncols, "gk_csr_CompactColumns: clens"); for (i=0; i<ncols; i++) { clens[i].key = 0; clens[i].val = i; } for (i=0; i<rowptr[nrows]; i++) clens[rowind[i]].key++; gk_ikvsortd(ncols, clens); for (nncols=0, i=0; i<ncols; i++) { if (clens[i].key > 0) colmap[clens[i].val] = nncols++; else break; } for (i=0; i<rowptr[nrows]; i++) rowind[i] = colmap[rowind[i]]; mat->ncols = nncols; gk_free((void **)&colmap, &clens, LTERM); } /*************************************************************************/ /*! Sorts the indices in increasing order \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which set of indices to sort. */ /**************************************************************************/ void gk_csr_SortIndices(gk_csr_t *mat, int what) { int n, nn=0; ssize_t *ptr; int *ind; float *val; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); n = mat->nrows; ptr = mat->rowptr; ind = mat->rowind; val = mat->rowval; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); n = mat->ncols; ptr = mat->colptr; ind = mat->colind; val = mat->colval; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } #pragma omp parallel if (n > 100) { ssize_t i, j, k; gk_ikv_t *cand; float *tval; #pragma omp single for (i=0; i<n; i++) nn = gk_max(nn, ptr[i+1]-ptr[i]); cand = gk_ikvmalloc(nn, "gk_csr_SortIndices: cand"); tval = gk_fmalloc(nn, "gk_csr_SortIndices: tval"); #pragma omp for schedule(static) for (i=0; i<n; i++) { for (k=0, j=ptr[i]; j<ptr[i+1]; j++) { if (j > ptr[i] && ind[j] < ind[j-1]) k = 1; /* an inversion */ cand[j-ptr[i]].val = j-ptr[i]; cand[j-ptr[i]].key = ind[j]; tval[j-ptr[i]] = val[j]; } if (k) { gk_ikvsorti(ptr[i+1]-ptr[i], cand); for (j=ptr[i]; j<ptr[i+1]; j++) { ind[j] = cand[j-ptr[i]].key; val[j] = tval[cand[j-ptr[i]].val]; } } } gk_free((void **)&cand, &tval, LTERM); } } /*************************************************************************/ /*! Creates a row/column index from the column/row data. \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which index will be created. */ /**************************************************************************/ void gk_csr_CreateIndex(gk_csr_t *mat, int what) { /* 'f' stands for forward, 'r' stands for reverse */ ssize_t i, j, k, nf, nr; ssize_t *fptr, *rptr; int *find, *rind; float *fval, *rval; switch (what) { case GK_CSR_COL: nf = mat->nrows; fptr = mat->rowptr; find = mat->rowind; fval = mat->rowval; if (mat->colptr) gk_free((void **)&mat->colptr, LTERM); if (mat->colind) gk_free((void **)&mat->colind, LTERM); if (mat->colval) gk_free((void **)&mat->colval, LTERM); nr = mat->ncols; rptr = mat->colptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->colind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->colval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; case GK_CSR_ROW: nf = mat->ncols; fptr = mat->colptr; find = mat->colind; fval = mat->colval; if (mat->rowptr) gk_free((void **)&mat->rowptr, LTERM); if (mat->rowind) gk_free((void **)&mat->rowind, LTERM); if (mat->rowval) gk_free((void **)&mat->rowval, LTERM); nr = mat->nrows; rptr = mat->rowptr = gk_zsmalloc(nr+1, 0, "gk_csr_CreateIndex: rptr"); rind = mat->rowind = gk_imalloc(fptr[nf], "gk_csr_CreateIndex: rind"); rval = mat->rowval = (fval ? gk_fmalloc(fptr[nf], "gk_csr_CreateIndex: rval") : NULL); break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return; } for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rptr[find[j]]++; } MAKECSR(i, nr, rptr); if (rptr[nr] > 6*nr) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } SHIFTCSR(i, nr, rptr); if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rval[rptr[find[j]]++] = fval[j]; } SHIFTCSR(i, nr, rptr); } } else { if (fval) { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) { k = find[j]; rind[rptr[k]] = i; rval[rptr[k]++] = fval[j]; } } } else { for (i=0; i<nf; i++) { for (j=fptr[i]; j<fptr[i+1]; j++) rind[rptr[find[j]]++] = i; } } SHIFTCSR(i, nr, rptr); } } /*************************************************************************/ /*! Normalizes the rows/columns of the matrix to be unit length. \param mat the matrix itself, \param what indicates what will be normalized and is obtained by specifying GK_CSR_ROW, GK_CSR_COL, GK_CSR_ROW|GK_CSR_COL. \param norm indicates what norm is to normalize to, 1: 1-norm, 2: 2-norm */ /**************************************************************************/ void gk_csr_Normalize(gk_csr_t *mat, int what, int norm) { ssize_t i, j; int n; ssize_t *ptr; float *val, sum; if (what&GK_CSR_ROW && mat->rowval) { n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++){ if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; /* assume val[j] > 0 */ } if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } if (what&GK_CSR_COL && mat->colval) { n = mat->ncols; ptr = mat->colptr; val = mat->colval; #pragma omp parallel if (ptr[n] > OMPMINOPS) { #pragma omp for private(j,sum) schedule(static) for (i=0; i<n; i++) { for (sum=0.0, j=ptr[i]; j<ptr[i+1]; j++) if (norm == 2) sum += val[j]*val[j]; else if (norm == 1) sum += val[j]; if (sum > 0) { if (norm == 2) sum=1.0/sqrt(sum); else if (norm == 1) sum=1.0/sum; for (j=ptr[i]; j<ptr[i+1]; j++) val[j] *= sum; } } } } } /*************************************************************************/ /*! Applies different row scaling methods. \param mat the matrix itself, \param type indicates the type of row scaling. Possible values are: GK_CSR_MAXTF, GK_CSR_SQRT, GK_CSR_LOG, GK_CSR_IDF, GK_CSR_MAXTF2. */ /**************************************************************************/ void gk_csr_Scale(gk_csr_t *mat, int type) { ssize_t i, j; int nrows, ncols, nnzcols, bgfreq; ssize_t *rowptr; int *rowind, *collen; float *rowval, *cscale, maxtf; nrows = mat->nrows; rowptr = mat->rowptr; rowind = mat->rowind; rowval = mat->rowval; switch (type) { case GK_CSR_MAXTF: /* TF' = .5 + .5*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .5 + .5*rowval[j]/maxtf; } } break; case GK_CSR_MAXTF2: /* TF' = .1 + .9*TF/MAX(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j, maxtf) schedule(static) for (i=0; i<nrows; i++) { maxtf = fabs(rowval[rowptr[i]]); for (j=rowptr[i]; j<rowptr[i+1]; j++) maxtf = (maxtf < fabs(rowval[j]) ? fabs(rowval[j]) : maxtf); for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] = .1 + .9*rowval[j]/maxtf; } } break; case GK_CSR_SQRT: /* TF' = .1+SQRT(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(fabs(rowval[j]))); } } } break; case GK_CSR_POW25: /* TF' = .1+POW(TF,.25) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], sqrt(sqrt(fabs(rowval[j])))); } } } break; case GK_CSR_POW65: /* TF' = .1+POW(TF,.65) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .65)); } } } break; case GK_CSR_POW75: /* TF' = .1+POW(TF,.75) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .75)); } } } break; case GK_CSR_POW85: /* TF' = .1+POW(TF,.85) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = .1+sign(rowval[j], powf(fabs(rowval[j]), .85)); } } } break; case GK_CSR_LOG: /* TF' = 1+log_2(TF) */ #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { float logscale = 1.0/log(2.0); #pragma omp for schedule(static,32) for (i=0; i<rowptr[nrows]; i++) { if (rowval[i] != 0.0) rowval[i] = 1+(rowval[i]>0.0 ? log(rowval[i]) : -log(-rowval[i]))*logscale; } #ifdef XXX #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) { if (rowval[j] != 0.0) rowval[j] = 1+(rowval[j]>0.0 ? log(rowval[j]) : -log(-rowval[j]))*logscale; //rowval[j] = 1+sign(rowval[j], log(fabs(rowval[j]))*logscale); } } #endif } break; case GK_CSR_IDF: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*nrows/collen[i]) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; case GK_CSR_IDF2: /* TF' = TF*IDF */ ncols = mat->ncols; cscale = gk_fmalloc(ncols, "gk_csr_Scale: cscale"); collen = gk_ismalloc(ncols, 0, "gk_csr_Scale: collen"); for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) collen[rowind[j]]++; } nnzcols = 0; #pragma omp parallel if (ncols > OMPMINOPS) { #pragma omp for schedule(static) reduction(+:nnzcols) for (i=0; i<ncols; i++) nnzcols += (collen[i] > 0 ? 1 : 0); bgfreq = gk_max(10, (ssize_t)(.5*rowptr[nrows]/nnzcols)); printf("nnz: %zd, nnzcols: %d, bgfreq: %d\n", rowptr[nrows], nnzcols, bgfreq); #pragma omp for schedule(static) for (i=0; i<ncols; i++) cscale[i] = (collen[i] > 0 ? log(1.0*(nrows+2*bgfreq)/(bgfreq+collen[i])) : 0.0); } #pragma omp parallel if (rowptr[nrows] > OMPMINOPS) { #pragma omp for private(j) schedule(static) for (i=0; i<nrows; i++) { for (j=rowptr[i]; j<rowptr[i+1]; j++) rowval[j] *= cscale[rowind[j]]; } } gk_free((void **)&cscale, &collen, LTERM); break; default: gk_errexit(SIGERR, "Unknown scaling type of %d\n", type); } } /*************************************************************************/ /*! Computes the sums of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which sums to compute. */ /**************************************************************************/ void gk_csr_ComputeSums(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *sums; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rsums) gk_free((void **)&mat->rsums, LTERM); sums = mat->rsums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->csums) gk_free((void **)&mat->csums, LTERM); sums = mat->csums = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: sums"); break; default: gk_errexit(SIGERR, "Invalid sum type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) sums[i] = gk_fsum(ptr[i+1]-ptr[i], val+ptr[i], 1); } /*************************************************************************/ /*! Computes the squared of the norms of the rows/columns \param mat the matrix itself, \param what is either GK_CSR_ROW or GK_CSR_COL indicating which squared norms to compute. */ /**************************************************************************/ void gk_csr_ComputeSquaredNorms(gk_csr_t *mat, int what) { ssize_t i; int n; ssize_t *ptr; float *val, *norms; switch (what) { case GK_CSR_ROW: n = mat->nrows; ptr = mat->rowptr; val = mat->rowval; if (mat->rnorms) gk_free((void **)&mat->rnorms, LTERM); norms = mat->rnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; case GK_CSR_COL: n = mat->ncols; ptr = mat->colptr; val = mat->colval; if (mat->cnorms) gk_free((void **)&mat->cnorms, LTERM); norms = mat->cnorms = gk_fsmalloc(n, 0, "gk_csr_ComputeSums: norms"); break; default: gk_errexit(SIGERR, "Invalid norm type of %d.\n", what); return; } #pragma omp parallel for if (ptr[n] > OMPMINOPS) schedule(static) for (i=0; i<n; i++) norms[i] = gk_fdot(ptr[i+1]-ptr[i], val+ptr[i], 1, val+ptr[i], 1); } /*************************************************************************/ /*! Computes the similarity between two rows/columns \param mat the matrix itself. The routine assumes that the indices are sorted in increasing order. \param i1 is the first row/column, \param i2 is the second row/column, \param what is either GK_CSR_ROW or GK_CSR_COL indicating the type of objects between the similarity will be computed, \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \returns the similarity between the two rows/columns. */ /**************************************************************************/ float gk_csr_ComputeSimilarity(gk_csr_t *mat, int i1, int i2, int what, int simtype) { int nind1, nind2; int *ind1, *ind2; float *val1, *val2, stat1, stat2, sim; switch (what) { case GK_CSR_ROW: if (!mat->rowptr) gk_errexit(SIGERR, "Row-based view of the matrix does not exists.\n"); nind1 = mat->rowptr[i1+1]-mat->rowptr[i1]; nind2 = mat->rowptr[i2+1]-mat->rowptr[i2]; ind1 = mat->rowind + mat->rowptr[i1]; ind2 = mat->rowind + mat->rowptr[i2]; val1 = mat->rowval + mat->rowptr[i1]; val2 = mat->rowval + mat->rowptr[i2]; break; case GK_CSR_COL: if (!mat->colptr) gk_errexit(SIGERR, "Column-based view of the matrix does not exists.\n"); nind1 = mat->colptr[i1+1]-mat->colptr[i1]; nind2 = mat->colptr[i2+1]-mat->colptr[i2]; ind1 = mat->colind + mat->colptr[i1]; ind2 = mat->colind + mat->colptr[i2]; val1 = mat->colval + mat->colptr[i1]; val2 = mat->colval + mat->colptr[i2]; break; default: gk_errexit(SIGERR, "Invalid index type of %d.\n", what); return 0.0; } switch (simtype) { case GK_CSR_COS: case GK_CSR_JAC: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]*val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]*val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]*val2[i2]; i2++; } else { sim += val1[i1]*val2[i2]; stat1 += val1[i1]*val1[i1]; stat2 += val2[i2]*val2[i2]; i1++; i2++; } } if (simtype == GK_CSR_COS) sim = (stat1*stat2 > 0.0 ? sim/sqrt(stat1*stat2) : 0.0); else sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_MIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1+stat2-sim > 0.0 ? sim/(stat1+stat2-sim) : 0.0); break; case GK_CSR_AMIN: sim = stat1 = stat2 = 0.0; i1 = i2 = 0; while (i1<nind1 && i2<nind2) { if (i1 == nind1) { stat2 += val2[i2]; i2++; } else if (i2 == nind2) { stat1 += val1[i1]; i1++; } else if (ind1[i1] < ind2[i2]) { stat1 += val1[i1]; i1++; } else if (ind1[i1] > ind2[i2]) { stat2 += val2[i2]; i2++; } else { sim += gk_min(val1[i1],val2[i2]); stat1 += val1[i1]; stat2 += val2[i2]; i1++; i2++; } } sim = (stat1 > 0.0 ? sim/stat1 : 0.0); break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } return sim; } /*************************************************************************/ /*! Finds the n most similar rows (neighbors) to the query using cosine similarity. \param mat the matrix itself \param nqterms is the number of columns in the query \param qind is the list of query columns \param qval is the list of correspodning query weights \param simtype is the type of similarity and is one of GK_CSR_COS, GK_CSR_JAC, GK_CSR_MIN, GK_CSR_AMIN \param nsim is the maximum number of requested most similar rows. If -1 is provided, then everything is returned unsorted. \param minsim is the minimum similarity of the requested most similar rows \param hits is the result set. This array should be at least of length nsim. \param i_marker is an array of size equal to the number of rows whose values are initialized to -1. If NULL is provided then this array is allocated and freed internally. \param i_cand is an array of size equal to the number of rows. If NULL is provided then this array is allocated and freed internally. \returns the number of identified most similar rows, which can be smaller than the requested number of nnbrs in those cases in which there are no sufficiently many neighbors. */ /**************************************************************************/ int gk_csr_GetSimilarRows(gk_csr_t *mat, int nqterms, int *qind, float *qval, int simtype, int nsim, float minsim, gk_fkv_t *hits, int *i_marker, gk_fkv_t *i_cand) { ssize_t i, ii, j, k; int nrows, ncols, ncand; ssize_t *colptr; int *colind, *marker; float *colval, *rnorms, mynorm, *rsums, mysum; gk_fkv_t *cand; if (nqterms == 0) return 0; nrows = mat->nrows; ncols = mat->ncols; colptr = mat->colptr; colind = mat->colind; colval = mat->colval; marker = (i_marker ? i_marker : gk_ismalloc(nrows, -1, "gk_csr_SimilarRows: marker")); cand = (i_cand ? i_cand : gk_fkvmalloc(nrows, "gk_csr_SimilarRows: cand")); switch (simtype) { case GK_CSR_COS: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } break; case GK_CSR_JAC: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += colval[j]*qval[ii]; } } } rnorms = mat->rnorms; mynorm = gk_fdot(nqterms, qval, 1, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rnorms[cand[i].val]+mynorm-cand[i].key); break; case GK_CSR_MIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } rsums = mat->rsums; mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/(rsums[cand[i].val]+mysum-cand[i].key); break; /* Assymetric MIN similarity */ case GK_CSR_AMIN: for (ncand=0, ii=0; ii<nqterms; ii++) { i = qind[ii]; if (i < ncols) { for (j=colptr[i]; j<colptr[i+1]; j++) { k = colind[j]; if (marker[k] == -1) { cand[ncand].val = k; cand[ncand].key = 0; marker[k] = ncand++; } cand[marker[k]].key += gk_min(colval[j], qval[ii]); } } } mysum = gk_fsum(nqterms, qval, 1); for (i=0; i<ncand; i++) cand[i].key = cand[i].key/mysum; break; default: gk_errexit(SIGERR, "Unknown similarity measure %d\n", simtype); return -1; } /* go and prune the hits that are bellow minsim */ for (j=0, i=0; i<ncand; i++) { marker[cand[i].val] = -1; if (cand[i].key >= minsim) cand[j++] = cand[i]; } ncand = j; if (nsim == -1 || nsim >= ncand) { nsim = ncand; } else { nsim = gk_min(nsim, ncand); gk_dfkvkselect(ncand, nsim, cand); gk_fkvsortd(nsim, cand); } gk_fkvcopy(nsim, cand, hits); if (i_marker == NULL) gk_free((void **)&marker, LTERM); if (i_cand == NULL) gk_free((void **)&cand, LTERM); return nsim; }
GB_unaryop__ainv_bool_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_bool_int16 // op(A') function: GB_tran__ainv_bool_int16 // C type: bool // A type: int16_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_bool_int16 ( bool *Cx, // Cx and Ax may be aliased int16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_bool_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
IO.h
// This code is part of the project "Ligra: A Lightweight Graph Processing // Framework for Shared Memory", presented at Principles and Practice of // Parallel Programming, 2013. // Copyright (c) 2013 Julian Shun and Guy Blelloch // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights (to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include <iostream> #include <fstream> #include <stdlib.h> #include <cmath> #include <sys/mman.h> #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <parallel/algorithm> #include <omp.h> #include <cassert> #include "parallel.h" #include "blockRadixSort.h" #include "quickSort.h" #include "utils.h" #include "graph.h" #include "pvector.h" #include "timer.h" #include "sliding_queue.h" #include "dbg.h" using namespace std; typedef pair<uintE,uintE> intPair; typedef pair<uintE, pair<uintE,intE> > intTriple; template <class E> struct pairFirstCmp { bool operator() (pair<uintE,E> a, pair<uintE,E> b) { return a.first < b.first; } }; template <class E> struct getFirst {uintE operator() (pair<uintE,E> a) {return a.first;} }; template <class IntType> struct pairBothCmp { bool operator() (pair<uintE,IntType> a, pair<uintE,IntType> b) { if (a.first != b.first) return a.first < b.first; return a.second < b.second; } }; // A structure that keeps a sequence of strings all allocated from // the same block of memory struct words { long n; // total number of characters char* Chars; // array storing all strings long m; // number of substrings char** Strings; // pointers to strings (all should be null terminated) words() {} words(char* C, long nn, char** S, long mm) : Chars(C), n(nn), Strings(S), m(mm) {} void del() {free(Chars); free(Strings);} }; inline bool isSpace(char c) { switch (c) { case '\r': case '\t': case '\n': case 0: case ' ' : return true; default : return false; } } _seq<char> mmapStringFromFile(const char *filename) { struct stat sb; int fd = open(filename, O_RDONLY); if (fd == -1) { perror("open"); exit(-1); } if (fstat(fd, &sb) == -1) { perror("fstat"); exit(-1); } if (!S_ISREG (sb.st_mode)) { perror("not a file\n"); exit(-1); } char *p = static_cast<char*>(mmap(0, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0)); if (p == MAP_FAILED) { perror("mmap"); exit(-1); } if (close(fd) == -1) { perror("close"); exit(-1); } size_t n = sb.st_size; // char *bytes = newA(char, n); // parallel_for(size_t i=0; i<n; i++) { // bytes[i] = p[i]; // } // if (munmap(p, sb.st_size) == -1) { // perror("munmap"); // exit(-1); // } // cout << "mmapped" << endl; // free(bytes); // exit(0); return _seq<char>(p, n); } _seq<char> readStringFromFile(char *fileName) { ifstream file (fileName, ios::in | ios::binary | ios::ate); if (!file.is_open()) { std::cout << "Unable to open file: " << fileName << std::endl; abort(); } long end = file.tellg(); file.seekg (0, ios::beg); long n = end - file.tellg(); char* bytes = newA(char,n+1); assert(bytes != NULL && "Malloc failure\n"); file.read (bytes,n); file.close(); return _seq<char>(bytes,n); } // parallel code for converting a string to words words stringToWords(char *Str, long n) { {parallel_for (long i=0; i < n; i++) if (isSpace(Str[i])) Str[i] = 0; } // mark start of words bool *FL = newA(bool,n); assert(FL != NULL && "Malloc failure\n"); FL[0] = Str[0]; {parallel_for (long i=1; i < n; i++) FL[i] = Str[i] && !Str[i-1];} // offset for each start of word _seq<long> Off = sequence::packIndex<long>(FL, n); free(FL); long m = Off.n; long *offsets = Off.A; // pointer to each start of word char **SA = newA(char*, m); assert(SA != NULL && "Malloc failure\n"); {parallel_for (long j=0; j < m; j++) SA[j] = Str+offsets[j];} free(offsets); return words(Str,n,SA,m); } template <class vertex> graph<vertex> readGraphFromFile(char* fname, bool isSymmetric, bool mmap) { #ifndef VGR words W; if (mmap) { _seq<char> S = mmapStringFromFile(fname); char *bytes = newA(char, S.n); assert(bytes != NULL && "Malloc failure\n"); // Cannot mutate the graph unless we copy. parallel_for(size_t i=0; i<S.n; i++) { bytes[i] = S.A[i]; } if (munmap(S.A, S.n) == -1) { perror("munmap"); exit(-1); } S.A = bytes; W = stringToWords(S.A, S.n); } else { _seq<char> S = readStringFromFile(fname); W = stringToWords(S.A, S.n); } #ifndef WEIGHTED if (W.Strings[0] != (string) "AdjacencyGraph") { #else if (W.Strings[0] != (string) "WeightedAdjacencyGraph") { #endif cout << "Bad input file" << endl; abort(); } long len = W.m -1; long n = atol(W.Strings[1]); long m = atol(W.Strings[2]); #ifndef WEIGHTED if (len != n + m + 2) { #else if (len != n + 2*m + 2) { #endif cout << "Bad input file" << endl; abort(); } uintT* offsets = newA(uintT,n); assert(offsets != NULL && "Malloc failure\n"); #ifndef WEIGHTED uintE* edges = newA(uintE,m); #else intE* edges = newA(intE,2*m); #endif assert(edges != NULL && "Malloc failure\n"); {parallel_for(long i=0; i < n; i++) offsets[i] = atol(W.Strings[i + 3]);} {parallel_for(long i=0; i<m; i++) { #ifndef WEIGHTED edges[i] = atol(W.Strings[i+n+3]); #else edges[2*i] = atol(W.Strings[i+n+3]); edges[2*i+1] = atol(W.Strings[i+n+m+3]); #endif }} //W.del(); // to deal with performance bug in malloc W.del(); //The original code ^ commented this out #else // #ifdef VGR // Added by Priyank if ( sizeof(uintT) != sizeof(unsigned long long) ) { std::cout << sizeof(uintT) << " " << sizeof(unsigned long long) << std::endl; abort(); } if ( sizeof(uintE) != sizeof(unsigned int) ) { std::cout << sizeof(uintE) << " " << sizeof(unsigned int) << std::endl; abort(); } ifstream ifs(fname, std::ios::binary); if ( !ifs.good() ) { std::cout << "Unable to open file: " << fname << std::endl; abort(); } unsigned long long major_version; unsigned long long minor_version; unsigned long long n1; unsigned long long m1; ifs.read((char*)&major_version, sizeof(unsigned long long)); ifs.read((char*)&minor_version, sizeof(unsigned long long)); ifs.read((char*)&n1, sizeof(unsigned long long)); ifs.read((char*)&m1, sizeof(unsigned long long)); long n,m; m = m1; n = n1; #ifndef WEIGHTED if ( ( major_version != 1 ) || ( minor_version != 0 ) ) { std::cout << "major: " << major_version << " minor: " << minor_version << " n: " << n << " m: " << m << std::endl; abort(); } #else if ( ( major_version != 1 ) || ( minor_version != 4 ) ) { std::cout << "major: " << major_version << " minor: " << minor_version << " n: " << n << " m: " << m << std::endl; abort(); } #endif uintT* offsets = newA(uintT,n); assert(offsets != NULL && "Malloc failure\n"); ifs.read((char*)&offsets[1], sizeof(unsigned long long) * (n-1)); if ( !ifs || (ifs.gcount() != (sizeof(unsigned long long) * (n-1))) ) { std::cout << "Error in reading offsets." << std::endl; abort(); } unsigned long long temp; ifs.read((char*)&temp, sizeof(unsigned long long) * (1)); offsets[0] = 0; #ifndef WEIGHTED uintE* edges = newA(uintE,m); #else intE* edges_ = newA(intE,2*m); assert(edges_ != NULL && "Malloc failure\n"); intE* edges = newA(intE,2*m); #endif assert(edges != NULL && "Malloc failure\n"); #ifndef WEIGHTED ifs.read((char*)edges, sizeof(unsigned int) * m); if ( !ifs || (ifs.gcount() != (sizeof(unsigned int) * m)) ) { std::cout << "Error in reading edges." << std::endl; abort(); } #else ifs.read((char*)&edges_[0], sizeof(unsigned int) * m); if ( !ifs || (ifs.gcount() != (sizeof(unsigned int) * m)) ) { std::cout << "Error in reading edges." << std::endl; abort(); } if ( (m % 2) == 1 ) { unsigned int x = 0; ifs.read((char*)&x, sizeof(unsigned int)); //std::cout << "extra element: " << x << std::endl; assert(x == 0); } ifs.read((char*)&edges_[m], sizeof(unsigned int) * m); if ( !ifs || (ifs.gcount() != (sizeof(unsigned int) * m)) ) { std::cout << "Error in reading edges." << std::endl; abort(); } //std::cout << edges_[m] << std::endl; {parallel_for(long i=0; i<m; i++) { edges[2*i] = edges_[i]; edges[2*i+1] = edges_[i+m]; }} free(edges_); #endif ifs.close(); #endif // VGR vertex* v = newA(vertex,n); assert(v != NULL && "Malloc failure\n"); {parallel_for (uintT i=0; i < n; i++) { uintT o = offsets[i]; uintT l = ((i == n-1) ? m : offsets[i+1])-offsets[i]; v[i].setOutDegree(l); #ifndef WEIGHTED v[i].setOutNeighbors(edges+o); #else v[i].setOutNeighbors(edges+2*o); #endif }} if(!isSymmetric) { uintT* tOffsets = newA(uintT,n); assert(tOffsets != NULL && "Malloc failure\n"); {parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;} #ifndef WEIGHTED intPair* temp = newA(intPair,m); #else intTriple* temp = newA(intTriple,m); #endif assert(temp != NULL && "Malloc failure\n"); {parallel_for(long i=0;i<n;i++){ uintT o = offsets[i]; for(uintT j=0;j<v[i].getOutDegree();j++){ #ifndef WEIGHTED temp[o+j] = make_pair(v[i].getOutNeighbor(j),i); #else temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j))); #endif } }} free(offsets); #ifndef WEIGHTED #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<uintE>()); #else quickSort(temp,m,pairFirstCmp<uintE>()); #endif #else #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<intPair>()); #else quickSort(temp,m,pairFirstCmp<intPair>()); #endif #endif tOffsets[temp[0].first] = 0; #ifndef WEIGHTED uintE* inEdges = newA(uintE,m); inEdges[0] = temp[0].second; #else intE* inEdges = newA(intE,2*m); inEdges[0] = temp[0].second.first; inEdges[1] = temp[0].second.second; #endif assert(inEdges != NULL && "Malloc failure\n"); {parallel_for(long i=1;i<m;i++) { #ifndef WEIGHTED inEdges[i] = temp[i].second; #else inEdges[2*i] = temp[i].second.first; inEdges[2*i+1] = temp[i].second.second; #endif if(temp[i].first != temp[i-1].first) { tOffsets[temp[i].first] = i; } }} free(temp); //fill in offsets of degree 0 vertices by taking closest non-zero //offset to the right sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m); {parallel_for(long i=0;i<n;i++){ uintT o = tOffsets[i]; uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i]; v[i].setInDegree(l); #ifndef WEIGHTED v[i].setInNeighbors(inEdges+o); #else v[i].setInNeighbors(inEdges+2*o); #endif }} free(tOffsets); Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges); std::cout << "Read directed graph. Num Nodes = " << n << " and Num Edges = " << m << "\n"; return graph<vertex>(v,n,m,mem); } else { free(offsets); Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges); std::cout << "Read undirected graph. Num Nodes = " << n << " and Num Edges = " << m << "\n"; return graph<vertex>(v,n,m,mem); } } template <class vertex> graph<vertex> readGraphFromBinary(char* iFile, bool isSymmetric) { char* config = (char*) ".config"; char* adj = (char*) ".adj"; char* idx = (char*) ".idx"; char configFile[strlen(iFile)+strlen(config)+1]; char adjFile[strlen(iFile)+strlen(adj)+1]; char idxFile[strlen(iFile)+strlen(idx)+1]; *configFile = *adjFile = *idxFile = '\0'; strcat(configFile,iFile); strcat(adjFile,iFile); strcat(idxFile,iFile); strcat(configFile,config); strcat(adjFile,adj); strcat(idxFile,idx); ifstream in(configFile, ifstream::in); long n; in >> n; in.close(); ifstream in2(adjFile,ifstream::in | ios::binary); //stored as uints in2.seekg(0, ios::end); long size = in2.tellg(); in2.seekg(0); #ifdef WEIGHTED long m = size/(2*sizeof(uint)); #else long m = size/sizeof(uint); #endif char* s = (char *) malloc(size); in2.read(s,size); in2.close(); uintE* edges = (uintE*) s; ifstream in3(idxFile,ifstream::in | ios::binary); //stored as longs in3.seekg(0, ios::end); size = in3.tellg(); in3.seekg(0); if(n != size/sizeof(intT)) { cout << "File size wrong\n"; abort(); } char* t = (char *) malloc(size); in3.read(t,size); in3.close(); uintT* offsets = (uintT*) t; vertex* v = newA(vertex,n); #ifdef WEIGHTED intE* edgesAndWeights = newA(intE,2*m); {parallel_for(long i=0;i<m;i++) { edgesAndWeights[2*i] = edges[i]; edgesAndWeights[2*i+1] = edges[i+m]; }} //free(edges); #endif {parallel_for(long i=0;i<n;i++) { uintT o = offsets[i]; uintT l = ((i==n-1) ? m : offsets[i+1])-offsets[i]; v[i].setOutDegree(l); #ifndef WEIGHTED v[i].setOutNeighbors((uintE*)edges+o); #else v[i].setOutNeighbors(edgesAndWeights+2*o); #endif }} if(!isSymmetric) { uintT* tOffsets = newA(uintT,n); {parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;} #ifndef WEIGHTED intPair* temp = newA(intPair,m); #else intTriple* temp = newA(intTriple,m); #endif {parallel_for(intT i=0;i<n;i++){ uintT o = offsets[i]; for(uintT j=0;j<v[i].getOutDegree();j++){ #ifndef WEIGHTED temp[o+j] = make_pair(v[i].getOutNeighbor(j),i); #else temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j))); #endif } }} free(offsets); #ifndef WEIGHTED #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<uintE>()); #else quickSort(temp,m,pairFirstCmp<uintE>()); #endif #else #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<intPair>()); #else quickSort(temp,m,pairFirstCmp<intPair>()); #endif #endif tOffsets[temp[0].first] = 0; #ifndef WEIGHTED uintE* inEdges = newA(uintE,m); inEdges[0] = temp[0].second; #else intE* inEdges = newA(intE,2*m); inEdges[0] = temp[0].second.first; inEdges[1] = temp[0].second.second; #endif {parallel_for(long i=1;i<m;i++) { #ifndef WEIGHTED inEdges[i] = temp[i].second; #else inEdges[2*i] = temp[i].second.first; inEdges[2*i+1] = temp[i].second.second; #endif if(temp[i].first != temp[i-1].first) { tOffsets[temp[i].first] = i; } }} free(temp); //fill in offsets of degree 0 vertices by taking closest non-zero //offset to the right sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m); {parallel_for(long i=0;i<n;i++){ uintT o = tOffsets[i]; uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i]; v[i].setInDegree(l); #ifndef WEIGHTED v[i].setInNeighbors((uintE*)inEdges+o); #else v[i].setInNeighbors((intE*)(inEdges+2*o)); #endif }} free(tOffsets); #ifndef WEIGHTED Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges); return graph<vertex>(v,n,m,mem); #else Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights,inEdges); return graph<vertex>(v,n,m,mem); #endif } free(offsets); #ifndef WEIGHTED Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges); return graph<vertex>(v,n,m,mem); #else Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights); return graph<vertex>(v,n,m,mem); #endif } template <class vertex> graph<vertex> readGraph(char* iFile, bool compressed, bool symmetric, bool binary, bool mmap) { if(binary) return readGraphFromBinary<vertex>(iFile,symmetric); else return readGraphFromFile<vertex>(iFile,symmetric,mmap); } template <class vertex> graph<vertex> readCompressedGraph(char* fname, bool isSymmetric, bool mmap) { char* s; if (mmap) { _seq<char> S = mmapStringFromFile(fname); // Cannot mutate graph unless we copy. char *bytes = newA(char, S.n); parallel_for(size_t i=0; i<S.n; i++) { bytes[i] = S.A[i]; } if (munmap(S.A, S.n) == -1) { perror("munmap"); exit(-1); } s = bytes; } else { ifstream in(fname,ifstream::in |ios::binary); in.seekg(0,ios::end); long size = in.tellg(); in.seekg(0); cout << "size = " << size << endl; s = (char*) malloc(size); in.read(s,size); in.close(); } long* sizes = (long*) s; long n = sizes[0], m = sizes[1], totalSpace = sizes[2]; cout << "n = "<<n<<" m = "<<m<<" totalSpace = "<<totalSpace<<endl; cout << "reading file..."<<endl; uintT* offsets = (uintT*) (s+3*sizeof(long)); long skip = 3*sizeof(long) + (n+1)*sizeof(intT); uintE* Degrees = (uintE*) (s+skip); skip+= n*sizeof(intE); uchar* edges = (uchar*)(s+skip); uintT* inOffsets; uchar* inEdges; uintE* inDegrees; if(!isSymmetric){ skip += totalSpace; uchar* inData = (uchar*)(s + skip); sizes = (long*) inData; long inTotalSpace = sizes[0]; cout << "inTotalSpace = "<<inTotalSpace<<endl; skip += sizeof(long); inOffsets = (uintT*) (s + skip); skip += (n+1)*sizeof(uintT); inDegrees = (uintE*)(s+skip); skip += n*sizeof(uintE); inEdges = (uchar*)(s + skip); } else { inOffsets = offsets; inEdges = edges; inDegrees = Degrees; } vertex *V = newA(vertex,n); parallel_for(long i=0;i<n;i++) { long o = offsets[i]; uintT d = Degrees[i]; V[i].setOutDegree(d); V[i].setOutNeighbors(edges+o); } if(sizeof(vertex) == sizeof(compressedAsymmetricVertex)){ parallel_for(long i=0;i<n;i++) { long o = inOffsets[i]; uintT d = inDegrees[i]; V[i].setInDegree(d); V[i].setInNeighbors(inEdges+o); } } cout << "creating graph..."<<endl; Compressed_Mem<vertex>* mem = new Compressed_Mem<vertex>(V, s); graph<vertex> G(V,n,m,mem); return G; } /* prefix sum used by the preprocess function defined below */ static pvector<uintT> ParallelPrefixSum (const pvector<uintT> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<uintT> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { uintT lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<uintT> bulk_prefix(num_blocks+1); uintT total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<uintT> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { uintT local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } template <class vertex> void generateHubClusterMapping(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; pvector<degree_nodeid_t> degree_id_pairs(numVertices); uintT avgDegree = numEdges / numVertices; uintT hubCount {0}; const int PADDING = 64 / sizeof(uintE); uintE* localOffsets = new uintE[omp_get_max_threads() * PADDING](); uintE partitionSz = numVertices / omp_get_max_threads(); #pragma omp parallel { int tid = omp_get_thread_num(); uintE startID = partitionSz * tid; uintE stopID = partitionSz * (tid + 1); if (tid == omp_get_max_threads() - 1) { stopID = numVertices; } for (uintE n = startID; n < stopID; ++n) { vertex vtx = origG[n]; if (useOutdeg) { if (vtx.getOutDegree() > avgDegree) { ++localOffsets[tid * PADDING]; new_ids[n] = 1; } } else { if (vtx.getInDegree() > avgDegree) { ++localOffsets[tid * PADDING]; new_ids[n] = 1; } } } } uintE sum {0}; for (int tid = 0; tid < omp_get_max_threads(); ++tid) { auto origCount = localOffsets[tid * PADDING]; localOffsets[tid * PADDING] = sum; sum += origCount; } /* Step II - assign a remap for the hub vertices first */ #pragma omp parallel { uintE localCtr {0}; int tid = omp_get_thread_num(); uintE startID = partitionSz * tid; uintE stopID = partitionSz * (tid + 1); if (tid == omp_get_max_threads() - 1) { stopID = numVertices; } for (uintE n = startID; n < stopID; ++n) { if (new_ids[n] != UINT_E_MAX) { new_ids[n] = localOffsets[tid * PADDING] + localCtr; ++localCtr; } } } delete[] localOffsets; /* Step III - assigning a remap for (easy) non hub vertices */ auto numHubs = sum; SlidingQueue<uintE> queue(numHubs); #pragma omp parallel { //assert(omp_get_max_threads() == 56); QueueBuffer<uintE> lqueue(queue, numHubs / omp_get_max_threads()); #pragma omp for for (uintE n = numHubs; n < numVertices; ++n) { if (new_ids[n] == UINT_E_MAX) { // This steps preserves the ordering of the original graph (as much as possible) new_ids[n] = n; } else { uintE remappedTo = new_ids[n]; if (new_ids[remappedTo] == UINT_E_MAX) { // safe to swap Ids because the original vertex is a non-hub new_ids[remappedTo] = n; } else { // Cannot swap ids because original vertex was a hub (swapping // would disturb sorted ordering of hubs - not allowed) lqueue.push_back(n); } } } lqueue.flush(); } queue.slide_window(); //the queue keeps a list of vertices where a simple swap of locations is not possible /* Step IV - assigning remaps for remaining non hubs */ uintE unassignedCtr {0}; auto q_iter = queue.begin(); #pragma omp parallel for for (uintE n = 0; n < numHubs; ++n) { if (new_ids[n] == UINT_E_MAX) { uintE u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1)); new_ids[n] = u; } } t.Stop(); t.PrintTime("HubCluster Map Time", t.Seconds()); } template <class vertex> void generateHubSortMapping(const graph<vertex>& GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank, bool isDenseWrite) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; pvector<degree_nodeid_t> degree_id_pairs(numVertices); uintT avgDegree = numEdges / numVertices; uintT hubCount {0}; /* STEP I - collect degrees of all vertices */ #pragma omp parallel for reduction(+ : hubCount) for (uintE v = 0; v < numVertices; ++v) { vertex vtx = origG[v]; if (useOutdeg) { degree_id_pairs[v] = std::make_pair(vtx.getOutDegree(), v); if (vtx.getOutDegree() > avgDegree) { ++hubCount; } } else { degree_id_pairs[v] = std::make_pair(vtx.getInDegree(), v); if (vtx.getInDegree() > avgDegree) { ++hubCount; } } } /* Step II - sort the degrees in parallel */ __gnu_parallel::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_nodeid_t>()); /* Step III - make a remap based on the sorted degree list [Only for hubs] */ #pragma omp parallel for for (uintE n = 0; n < hubCount; ++n) { new_ids[degree_id_pairs[n].second] = n; } //clearing space from degree pairs pvector<degree_nodeid_t>().swap(degree_id_pairs); /* Step IV - assigning a remap for (easy) non hub vertices */ auto numHubs = hubCount; SlidingQueue<uintE> queue(numHubs); #pragma omp parallel { QueueBuffer<uintE> lqueue(queue, numHubs / omp_get_max_threads()); #pragma omp for for (uintE n = numHubs; n < numVertices; ++n) { if (new_ids[n] == UINT_E_MAX) { // This steps preserves the ordering of the original graph (as much as possible) new_ids[n] = n; } else { uintE remappedTo = new_ids[n]; if (new_ids[remappedTo] == UINT_E_MAX) { // safe to swap Ids because the original vertex is a non-hub new_ids[remappedTo] = n; } else { // Cannot swap ids because original vertex was a hub (swapping // would disturb sorted ordering of hubs - not allowed) lqueue.push_back(n); } } } lqueue.flush(); } queue.slide_window(); //the queue keeps a list of vertices where a simple swap of locations is not possible /* Step V - assigning remaps for remaining non hubs */ uintE unassignedCtr {0}; auto q_iter = queue.begin(); #pragma omp parallel for for (uintE n = 0; n < numHubs; ++n) { if (new_ids[n] == UINT_E_MAX) { uintE u = *(q_iter + __sync_fetch_and_add(&unassignedCtr, 1)); new_ids[n] = u; } } t.Stop(); t.PrintTime("HubSort Map Time", t.Seconds()); } /* Preprocess a graph based on outdegrees or indegrees PageRank-specific Optimizations for directed graphs - 1) We do not create a new outNeighbors list (because it pull-only) 2) We only create new out-degrees because PR uses it during computation */ template <class vertex> graph<vertex> preprocessGraph(graph<vertex> GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank = false, bool isDenseWrite = false, ReorderingAlgo reordering_algo = DBG) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; if (!isSym) { generateMapping(GA, reordering_algo, isSym, useOutdeg, new_ids, isPageRank, isDenseWrite); /* Step VI - generate degree list for new graph */ pvector<uintT> degrees(numVertices); pvector<uintT> inv_degrees(numVertices); #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { auto newID = new_ids[v]; if (useOutdeg) { vertex vtx = origG[v]; degrees[newID] = vtx.getOutDegree(); inv_degrees[newID] = vtx.getInDegree(); } else { vertex vtx = origG[v]; degrees[newID] = vtx.getInDegree(); inv_degrees[newID] = vtx.getOutDegree(); } } /* Step VII - make a new vertex list for the new graph */ pvector<uintT> offsets = ParallelPrefixSum(degrees); pvector<uintT> inv_offsets = ParallelPrefixSum(inv_degrees); //clearing space from degree lists pvector<uintT>().swap(degrees); pvector<uintT>().swap(inv_degrees); #ifndef WEIGHTED uintE* outEdges = newA(uintE, numEdges); uintE* inEdges = newA(uintE, numEdges); #else intE* outEdges = newA(intE, 2 * numEdges); intE* inEdges = newA(intE, 2 * numEdges); #endif vertex* newV = newA(vertex, numVertices); #pragma omp parallel for schedule (dynamic, 1024) for (uintE v = 0; v < numVertices; ++v) { /* note that vertex IDs u and v belong to the space of original vertex IDs */ if (!isPageRank) { //copy out-neighbors auto newID = new_ids[v]; newV[newID].setOutDegree(origG[v].getOutDegree()); #ifndef WEIGHTED if (useOutdeg) newV[newID].setOutNeighbors(outEdges + offsets[newID]); else newV[newID].setOutNeighbors(outEdges + inv_offsets[newID]); #else if (useOutdeg) newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]); else newV[newID].setOutNeighbors(outEdges + 2 * inv_offsets[newID]); #endif for (uintE u = 0; u < origG[v].getOutDegree(); ++u) { auto origNgh = origG[v].getOutNeighbor(u); newV[newID].setOutNeighbor(u, new_ids[origNgh]); #ifdef WEIGHTED newV[newID].setOutWeight(u, origG[v].getOutWeight(u)); #endif } if (!isDenseWrite) { /* for dense-write pushonly apps we dont need in-neighbors */ //copy in-neighbors newV[newID].setInDegree(origG[v].getInDegree()); #ifndef WEIGHTED if (useOutdeg) newV[newID].setInNeighbors(inEdges + inv_offsets[newID]); else newV[newID].setInNeighbors(inEdges + offsets[newID]); #else if (useOutdeg) newV[newID].setInNeighbors(inEdges + 2 * inv_offsets[newID]); else newV[newID].setInNeighbors(inEdges + 2 * offsets[newID]); #endif for (uintE u = 0; u < origG[v].getInDegree(); ++u) { auto origNgh = origG[v].getInNeighbor(u); newV[newID].setInNeighbor(u, new_ids[origNgh]); #ifdef WEIGHTED newV[newID].setInWeight(u, origG[v].getInWeight(u)); #endif } } } else { /* PageRank - no need to apply weighted conditionals */ //copy in-neighbors auto newID = new_ids[v]; newV[newID].setInDegree(origG[v].getInDegree()); if (useOutdeg) newV[newID].setInNeighbors(inEdges + inv_offsets[newID]); else newV[newID].setInNeighbors(inEdges + offsets[newID]); for (uintE u = 0; u < origG[v].getInDegree(); ++u) { auto origNgh = origG[v].getInNeighbor(u); newV[newID].setInNeighbor(u, new_ids[origNgh]); } //only set out-degrees newV[newID].setOutDegree(origG[v].getOutDegree()); } } /* Step V - make the new graph */ Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges,inEdges); t.Stop(); string s = ReorderingAlgoStr(reordering_algo) + " Total Map Time"; t.PrintTime(s.c_str(), t.Seconds()); return graph<vertex>(newV,numVertices,numEdges,mem); } else { /* undirected graph */ generateMapping(GA, reordering_algo, isSym, true, new_ids, isPageRank, isDenseWrite); /* Step VI - generate degree list for new graph */ pvector<uintT> degrees(numVertices); #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { auto newID = new_ids[v]; vertex vtx = origG[v]; degrees[newID] = vtx.getOutDegree(); } /* Step VII - make a new vertex list for the new graph */ pvector<uintT> offsets = ParallelPrefixSum(degrees); //clearing space from degrees pvector<uintT>().swap(degrees); #ifndef WEIGHTED uintE* outEdges = newA(uintE, numEdges); #else intE* outEdges = newA(intE, 2 * numEdges); #endif vertex* newV = newA(vertex, numVertices); #pragma omp parallel for schedule (dynamic, 1024) for (uintE v = 0; v < numVertices; ++v) { /* note that vertex IDs u and v belong to the space of original vertex IDs */ //copy neighbors auto newID = new_ids[v]; newV[newID].setOutDegree(origG[v].getOutDegree()); #ifndef WEIGHTED newV[newID].setOutNeighbors(outEdges + offsets[newID]); #else newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]); #endif for (uintE u = 0; u < origG[v].getOutDegree(); ++u) { auto origNgh = origG[v].getOutNeighbor(u); newV[newID].setOutNeighbor(u, new_ids[origNgh]); #ifdef WEIGHTED newV[newID].setOutWeight(u, origG[v].getOutWeight(u)); #endif } } /* Step V - make the new graph */ Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges); t.Stop(); string s = ReorderingAlgoStr(reordering_algo) + " Total Map Time"; t.PrintTime(s.c_str(), t.Seconds()); return graph<vertex>(newV,numVertices,numEdges,mem); } }
GB_unaryop__lnot_fp64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp64_bool // op(A') function: GB_tran__lnot_fp64_bool // C type: double // A type: bool // cast: double cij = (double) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp64_bool ( double *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cancel.c
// RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run | %sort-threads | FileCheck %s // REQUIRES: ompt #include "callback.h" int main() { #pragma omp parallel num_threads(1) { int x = 0; int i; #pragma omp for for(i = 0; i < 2; i++) { #pragma omp cancel for } #pragma omp sections { #pragma omp section { #pragma omp cancel sections } #pragma omp section { #pragma omp cancel sections } } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_cancel' // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id=[[TASK_ID:[0-9]+]], parallel_function={{0x[0-f]*}}, task_type=ompt_task_initial=1, has_dependences=no // cancel for // ___CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=20, codeptr_ra={{0x[0-f]*}} // cancel sections // ___CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=18, codeptr_ra={{0x[0-f]*}} return 0; }
update_ops_control_multi_target_single.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _USE_SIMD #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #endif void create_shift_mask_list_from_list_and_value_buf(const UINT* array, UINT count, UINT target, UINT* dst_array, ITYPE* dst_mask); //void multi_qubit_control_single_qubit_dense_matrix_gate_old_single(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void multi_qubit_control_single_qubit_dense_matrix_gate_old_parallel(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void multi_qubit_control_single_qubit_dense_matrix_gate_single(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //void multi_qubit_control_single_qubit_dense_matrix_gate_single_stack(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim); //ITYPE* create_shift_mask_list(const UINT* array, UINT count, UINT target); void create_shift_mask_list_from_list_and_value_buf(const UINT* array, UINT count, UINT target, UINT* dst_array, ITYPE* dst_mask) { UINT size = count + 1; memcpy(dst_array, array, sizeof(UINT)*count); dst_array[count] = target; sort_ui(dst_array, size); for (UINT i = 0; i < size; ++i) { dst_mask[i] = (1UL << dst_array[i]) - 1; } } void multi_qubit_control_single_qubit_dense_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { //multi_qubit_control_single_qubit_dense_matrix_gate_old_single(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_old_parallel(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_single(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_single_stack(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_single_unroll(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_single_simd(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); //multi_qubit_control_single_qubit_dense_matrix_gate_parallel_simd(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); if (control_qubit_index_count == 1) { single_qubit_control_single_qubit_dense_matrix_gate(control_qubit_index_list[0], control_value_list[0], target_qubit_index, matrix, state, dim); return; } #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { multi_qubit_control_single_qubit_dense_matrix_gate_single_simd(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); } else { multi_qubit_control_single_qubit_dense_matrix_gate_parallel_simd(control_qubit_index_list, control_value_list, control_qubit_index_count,target_qubit_index,matrix,state, dim); } #else multi_qubit_control_single_qubit_dense_matrix_gate_single_simd(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { multi_qubit_control_single_qubit_dense_matrix_gate_single_unroll(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); } else { multi_qubit_control_single_qubit_dense_matrix_gate_parallel_unroll(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); } #else multi_qubit_control_single_qubit_dense_matrix_gate_single_unroll(control_qubit_index_list, control_value_list, control_qubit_index_count, target_qubit_index, matrix, state, dim); #endif #endif } void multi_qubit_control_single_qubit_dense_matrix_gate_single_unroll(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; if (target_qubit_index == 0) { ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_0+1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_0+1] = matrix[2] * cval0 + matrix[3] * cval1; } } else if (sort_array[0] == 0) { ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } else { ITYPE state_index; for (state_index = 0; state_index < loop_dim; state_index+=2) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; CTYPE cval2 = state[basis_0+1]; CTYPE cval3 = state[basis_1+1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; state[basis_0+1] = matrix[0] * cval2 + matrix[1] * cval3; state[basis_1+1] = matrix[2] * cval2 + matrix[3] * cval3; } } } #ifdef _OPENMP void multi_qubit_control_single_qubit_dense_matrix_gate_parallel_unroll(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; if (target_qubit_index == 0) { ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_0 + 1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_0 + 1] = matrix[2] * cval0 + matrix[3] * cval1; } } else if (sort_array[0] == 0) { ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } else { ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; CTYPE cval2 = state[basis_0 + 1]; CTYPE cval3 = state[basis_1 + 1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; state[basis_0 + 1] = matrix[0] * cval2 + matrix[1] * cval3; state[basis_1 + 1] = matrix[2] * cval2 + matrix[3] * cval3; } } } #endif #ifdef _USE_SIMD void multi_qubit_control_single_qubit_dense_matrix_gate_single_simd(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; if (target_qubit_index == 0) { __m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2])); ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; double* ptr = (double*)(state + basis_0); __m256d data = _mm256_loadu_pd(ptr); __m256d data_u0 = _mm256_mul_pd(data, mv00); __m256d data_u1 = _mm256_mul_pd(data, mv01); __m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1); data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_d0 = _mm256_mul_pd(data, mv20); __m256d data_d1 = _mm256_mul_pd(data, mv21); __m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1); data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_r = _mm256_hadd_pd(data_u2, data_d2); data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 _mm256_storeu_pd(ptr, data_r); } } else if (sort_array[0] == 0) { ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } else { __m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0])); __m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1])); __m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2])); __m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3])); __m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3])); ITYPE state_index; for (state_index = 0; state_index < loop_dim; state_index += 2) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; double* ptr0 = (double*)(state + basis_0); double* ptr1 = (double*)(state + basis_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); __m256d data_u2 = _mm256_mul_pd(data0, mv00); __m256d data_u3 = _mm256_mul_pd(data1, mv10); __m256d data_u4 = _mm256_mul_pd(data0, mv01); __m256d data_u5 = _mm256_mul_pd(data1, mv11); __m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4); __m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5); __m256d data_d2 = _mm256_mul_pd(data0, mv20); __m256d data_d3 = _mm256_mul_pd(data1, mv30); __m256d data_d4 = _mm256_mul_pd(data0, mv21); __m256d data_d5 = _mm256_mul_pd(data1, mv31); __m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4); __m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5); __m256d data_r0 = _mm256_add_pd(data_u6, data_u7); __m256d data_r1 = _mm256_add_pd(data_d6, data_d7); _mm256_storeu_pd(ptr0, data_r0); _mm256_storeu_pd(ptr1, data_r1); } } } #ifdef _OPENMP void multi_qubit_control_single_qubit_dense_matrix_gate_parallel_simd(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; if (target_qubit_index == 0) { __m256d mv00 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[0]), cimag(matrix[0])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[2]), cimag(matrix[2])); ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; double* ptr = (double*)(state + basis_0); __m256d data = _mm256_loadu_pd(ptr); __m256d data_u0 = _mm256_mul_pd(data, mv00); __m256d data_u1 = _mm256_mul_pd(data, mv01); __m256d data_u2 = _mm256_hadd_pd(data_u0, data_u1); data_u2 = _mm256_permute4x64_pd(data_u2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_d0 = _mm256_mul_pd(data, mv20); __m256d data_d1 = _mm256_mul_pd(data, mv21); __m256d data_d2 = _mm256_hadd_pd(data_d0, data_d1); data_d2 = _mm256_permute4x64_pd(data_d2, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 __m256d data_r = _mm256_hadd_pd(data_u2, data_d2); data_r = _mm256_permute4x64_pd(data_r, 216); // (3210) -> (3120) : 1*0 + 4*2 + 16*1 + 64*3 = 216 _mm256_storeu_pd(ptr, data_r); } } else if (sort_array[0] == 0) { ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } else { __m256d mv00 = _mm256_set_pd(-cimag(matrix[0]), creal(matrix[0]), -cimag(matrix[0]), creal(matrix[0])); __m256d mv01 = _mm256_set_pd(creal(matrix[0]), cimag(matrix[0]), creal(matrix[0]), cimag(matrix[0])); __m256d mv10 = _mm256_set_pd(-cimag(matrix[1]), creal(matrix[1]), -cimag(matrix[1]), creal(matrix[1])); __m256d mv11 = _mm256_set_pd(creal(matrix[1]), cimag(matrix[1]), creal(matrix[1]), cimag(matrix[1])); __m256d mv20 = _mm256_set_pd(-cimag(matrix[2]), creal(matrix[2]), -cimag(matrix[2]), creal(matrix[2])); __m256d mv21 = _mm256_set_pd(creal(matrix[2]), cimag(matrix[2]), creal(matrix[2]), cimag(matrix[2])); __m256d mv30 = _mm256_set_pd(-cimag(matrix[3]), creal(matrix[3]), -cimag(matrix[3]), creal(matrix[3])); __m256d mv31 = _mm256_set_pd(creal(matrix[3]), cimag(matrix[3]), creal(matrix[3]), cimag(matrix[3])); ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; double* ptr0 = (double*)(state + basis_0); double* ptr1 = (double*)(state + basis_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); __m256d data_u2 = _mm256_mul_pd(data0, mv00); __m256d data_u3 = _mm256_mul_pd(data1, mv10); __m256d data_u4 = _mm256_mul_pd(data0, mv01); __m256d data_u5 = _mm256_mul_pd(data1, mv11); __m256d data_u6 = _mm256_hadd_pd(data_u2, data_u4); __m256d data_u7 = _mm256_hadd_pd(data_u3, data_u5); __m256d data_d2 = _mm256_mul_pd(data0, mv20); __m256d data_d3 = _mm256_mul_pd(data1, mv30); __m256d data_d4 = _mm256_mul_pd(data0, mv21); __m256d data_d5 = _mm256_mul_pd(data1, mv31); __m256d data_d6 = _mm256_hadd_pd(data_d2, data_d4); __m256d data_d7 = _mm256_hadd_pd(data_d3, data_d5); __m256d data_r0 = _mm256_add_pd(data_u6, data_u7); __m256d data_r1 = _mm256_add_pd(data_d6, data_d7); _mm256_storeu_pd(ptr0, data_r0); _mm256_storeu_pd(ptr1, data_r1); } } } #endif #endif /* void multi_qubit_control_single_qubit_dense_matrix_gate_old_single(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // insert index list const UINT insert_index_list_count = control_qubit_index_count + 1; UINT* insert_index_list = create_sorted_ui_list_value(control_qubit_index_list, control_qubit_index_count, target_qubit_index); // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // control mask ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> insert_index_list_count; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_c_t0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_c_t0 = insert_zero_to_basis_index(basis_c_t0, 1ULL << insert_index_list[cursor], insert_index_list[cursor]); } // flip controls basis_c_t0 ^= control_mask; // gather target ITYPE basis_c_t1 = basis_c_t0 ^ target_mask; // fetch values CTYPE cval_c_t0 = state[basis_c_t0]; CTYPE cval_c_t1 = state[basis_c_t1]; // set values state[basis_c_t0] = matrix[0] * cval_c_t0 + matrix[1] * cval_c_t1; state[basis_c_t1] = matrix[2] * cval_c_t0 + matrix[3] * cval_c_t1; } free(insert_index_list); } #ifdef _OPENMP void multi_qubit_control_single_qubit_dense_matrix_gate_old_parallel(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // insert index list const UINT insert_index_list_count = control_qubit_index_count + 1; UINT* insert_index_list = create_sorted_ui_list_value(control_qubit_index_list, control_qubit_index_count, target_qubit_index); // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // control mask ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> insert_index_list_count; ITYPE state_index; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_c_t0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_c_t0 = insert_zero_to_basis_index(basis_c_t0, 1ULL << insert_index_list[cursor], insert_index_list[cursor]); } // flip controls basis_c_t0 ^= control_mask; // gather target ITYPE basis_c_t1 = basis_c_t0 ^ target_mask; // fetch values CTYPE cval_c_t0 = state[basis_c_t0]; CTYPE cval_c_t1 = state[basis_c_t1]; // set values state[basis_c_t0] = matrix[0] * cval_c_t0 + matrix[1] * cval_c_t1; state[basis_c_t1] = matrix[2] * cval_c_t0 + matrix[3] * cval_c_t1; } free(insert_index_list); } #endif ITYPE* create_shift_mask_list(const UINT* array, UINT count, UINT target) { UINT size = count + 1; UINT* new_array = (UINT*)malloc(sizeof(UINT)*size); memcpy(new_array, array, sizeof(UINT)*count); new_array[count] = target; sort_ui(new_array, size); ITYPE* mask_array = (ITYPE*)malloc(sizeof(ITYPE)*size); for (UINT i = 0; i < size; ++i) { mask_array[i] = (1UL << new_array[i]) - 1; } free(new_array); return mask_array; } void multi_qubit_control_single_qubit_dense_matrix_gate_single(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { ITYPE* shift_masks = create_shift_mask_list(control_qubit_index_list, control_qubit_index_count, target_qubit_index); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & shift_masks[cursor]) + ((basis_0 & (~shift_masks[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 ^ target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } free(shift_masks); } void multi_qubit_control_single_qubit_dense_matrix_gate_single_stack(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { UINT sort_array[64]; ITYPE mask_array[64]; create_shift_mask_list_from_list_and_value_buf(control_qubit_index_list, control_qubit_index_count, target_qubit_index, sort_array, mask_array); const ITYPE target_mask = 1ULL << target_qubit_index; ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); const UINT insert_index_list_count = control_qubit_index_count + 1; const ITYPE loop_dim = dim >> insert_index_list_count; ITYPE state_index; for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = state_index; for (UINT cursor = 0; cursor < insert_index_list_count; ++cursor) { basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1); } basis_0 += control_mask; ITYPE basis_1 = basis_0 + target_mask; // fetch values CTYPE cval0 = state[basis_0]; CTYPE cval1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval0 + matrix[1] * cval1; state[basis_1] = matrix[2] * cval0 + matrix[3] * cval1; } } */
update_ops_named_X.c
#include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif //void X_gate_old(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void X_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim); //void X_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim); void X_gate(UINT target_qubit_index, CTYPE *state, ITYPE dim) { //UINT threshold = 13; //X_gate_old(target_qubit_index, state, dim); //X_gate_single(target_qubit_index, state, dim); //X_gate_single_simd(target_qubit_index, state, dim); //X_gate_single_unroll(target_qubit_index, state, dim); //X_gate_parallel(target_qubit_index, state, dim); //return; #ifdef _USE_SIMD #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { X_gate_single_simd(target_qubit_index, state, dim); } else { X_gate_parallel_simd(target_qubit_index, state, dim); } #else X_gate_single_simd(target_qubit_index, state, dim); #endif #else #ifdef _OPENMP UINT threshold = 13; if (dim < (((ITYPE)1) << threshold)) { X_gate_single_unroll(target_qubit_index, state, dim); } else { X_gate_parallel_unroll(target_qubit_index, state, dim); } #else X_gate_single_unroll(target_qubit_index, state, dim); #endif #endif } void X_gate_single_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; if (target_qubit_index == 0) { ITYPE basis_index = 0; for (basis_index = 0; basis_index < dim; basis_index += 2) { CTYPE temp = state[basis_index]; state[basis_index] = state[basis_index + 1]; state[basis_index + 1] = temp; } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0+1]; state[basis_index_0] = state[basis_index_1]; state[basis_index_0+1] = state[basis_index_1+1]; state[basis_index_1] = temp0; state[basis_index_1+1] = temp1; } } } #ifdef _OPENMP void X_gate_parallel_unroll(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; if (target_qubit_index == 0) { ITYPE basis_index = 0; #pragma omp parallel for for (basis_index = 0; basis_index < dim; basis_index += 2) { CTYPE temp = state[basis_index]; state[basis_index] = state[basis_index + 1]; state[basis_index + 1] = temp; } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp0 = state[basis_index_0]; CTYPE temp1 = state[basis_index_0 + 1]; state[basis_index_0] = state[basis_index_1]; state[basis_index_0 + 1] = state[basis_index_1 + 1]; state[basis_index_1] = temp0; state[basis_index_1 + 1] = temp1; } } } #endif #ifdef _USE_SIMD void X_gate_single_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; //double* cast_state = (double*)state; if (target_qubit_index == 0) { ITYPE basis_index = 0; for (basis_index = 0; basis_index < dim; basis_index += 2) { double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_permute4x64_pd(data, 78); // (3210) -> (1032) : 1*2 + 4*3 + 16*0 + 64*1 = 2+12+64=78 _mm256_storeu_pd(ptr, data); } } else { for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #ifdef _OPENMP void X_gate_parallel_simd(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; //double* cast_state = (double*)state; if (target_qubit_index == 0) { ITYPE basis_index = 0; #pragma omp parallel for for (basis_index = 0; basis_index < dim; basis_index += 2) { double* ptr = (double*)(state + basis_index); __m256d data = _mm256_loadu_pd(ptr); data = _mm256_permute4x64_pd(data, 78); // (3210) -> (1032) : 1*2 + 4*3 + 16*0 + 64*1 = 2+12+64=78 _mm256_storeu_pd(ptr, data); } } else { #pragma omp parallel for for (state_index = 0; state_index < loop_dim; state_index += 2) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; double* ptr0 = (double*)(state + basis_index_0); double* ptr1 = (double*)(state + basis_index_1); __m256d data0 = _mm256_loadu_pd(ptr0); __m256d data1 = _mm256_loadu_pd(ptr1); _mm256_storeu_pd(ptr1, data0); _mm256_storeu_pd(ptr0, data1); } } } #endif #endif /* void X_gate_old(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); ITYPE state_index; #ifdef _OPENMP //#pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = insert_zero_to_basis_index(state_index, mask, target_qubit_index); ITYPE basis_index_1 = basis_index_0 ^ mask; swap_amplitude(state, basis_index_0, basis_index_1); } } void X_gate_single(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } #ifdef _OPENMP void X_gate_parallel(UINT target_qubit_index, CTYPE *state, ITYPE dim) { const ITYPE loop_dim = dim / 2; const ITYPE mask = (1ULL << target_qubit_index); const ITYPE mask_low = mask - 1; const ITYPE mask_high = ~mask_low; ITYPE state_index = 0; #pragma omp parallel for for (state_index = 0; state_index < loop_dim; ++state_index) { ITYPE basis_index_0 = (state_index&mask_low) + ((state_index&mask_high) << 1); ITYPE basis_index_1 = basis_index_0 + mask; CTYPE temp = state[basis_index_0]; state[basis_index_0] = state[basis_index_1]; state[basis_index_1] = temp; } } #endif */
mm_dist_simd.c
#include <stdio.h> #include <stdlib.h> /* TEMPO SEQUENCIAL: real 3m30.117s user 1m15.715s sys 0m0.127s TEMPO PARALELO MULTICORE: real 1m29.670s user 1m14.390s sys 0m0.269s TEMPO PARALELO DISTRIBUTE: real 3m5.985s user 2m25.092s sys 0m40.084s Invocations Event Name Min Max Avg Total Device "GeForce GT 1030 (0)" Kernel: mm$_omp_fn$0 1 warps_launched 1985472 1985472 1985472 1985472 ==29739== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GT 1030 (0)" Kernel: mm$_omp_fn$0 1 warp_execution_efficiency Warp Execution Efficiency 100.00% 100.00% 100.00% TEMPO PARALELO DISTRIBUTE PARALLEL FOR: real 2m58.611s user 0m38.440s sys 0m9.928s Invocations Event Name Min Max Avg Total Device "GeForce GT 1030 (0)" Kernel: mm$_omp_fn$0 1 warps_launched 2520 2520 2520 2520 ==29658== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GT 1030 (0)" Kernel: mm$_omp_fn$0 1 warp_execution_efficiency Warp Execution Efficiency 100.00% 100.00% 100.00% TEMPO PARALELO GPU SIMD: real 0m22.656s user 0m5.845s sys 0m1.807s Invocations Event Name Min Max Avg Total Device "GeForce GT 1030 (0)" Kernel: mm$_omp_fn$0 1 warps_launched 2808 2808 2808 2808 ==29576== Metric result: Invocations Metric Name Metric Description Min Max Avg Device "GeForce GT 1030 (0)" Kernel: mm$_omp_fn$0 1 warp_execution_efficiency Warp Execution Efficiency 86.81% 86.81% 86.81% */ void mm(double* a, double* b, double* c, int width) { #pragma omp target map(to:a[0:width*width], b[0:width*width]) map(from:c[0:width*width]) #pragma omp teams distribute parallel for simd for (int i = 0; i < width; i++) { for (int j = 0; j < width; j++) { double sum = 0; for (int k = 0; k < width; k++) { double x = a[i * width + k]; double y = b[k * width + j]; sum += x * y; } c[i * width + j] = sum; } } } int main() { int width = 2000; double *a = (double*) malloc (width * width * sizeof(double)); double *b = (double*) malloc (width * width * sizeof(double)); double *c = (double*) malloc (width * width * sizeof(double)); #pragma omp parallel for for(int i = 0; i < width; i++) { for(int j = 0; j < width; j++) { a[i*width+j] = i; b[i*width+j] = j; c[i*width+j] = 0; } } mm(a,b,c,width); /*for(int i = 0; i < width; i++) { for(int j = 0; j < width; j++) { printf("%f\n",c[i*width+j]); } }*/ }
pi.c
/* * BSD 2-Clause License * * Copyright (c) 2020, Alessandro Capotondi * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file exercise7.c * @author Alessandro Capotondi * @date 27 Mar 2020 * @brief Exercise 8 * * Pi calculation * @see https://dolly.fim.unimore.it/2019/course/view.php?id=152 */ #include <stdio.h> #include <omp.h> #include "utils.h" /** * @brief EX 8- Pi Calculation * * This program computes pi as * \pi = 4 arctan(1) * = 4 \int _0 ^1 \frac{1} {1 + x^2} dx * * @return void */ #include <stdio.h> #include <math.h> #include <omp.h> #define NSTEPS 134217728 void exercise() { long i; double dx = 1.0 / NSTEPS; double pi = 0.0; double start_time = omp_get_wtime(); #pragma omp parallel for reduction(+ \ : pi) for (i = 0; i < NSTEPS; i++) { double x = (i + 0.5) * dx; pi += 1.0 / (1.0 + x * x); } pi *= 4.0 * dx; double run_time = omp_get_wtime() - start_time; double ref_pi = 4.0 * atan(1.0); printf("pi with %ld steps is %.10f in %.6f seconds (error=%e)\n", NSTEPS, pi, run_time, fabs(ref_pi - pi)); }
apply_bcs_curvilinear.h
// Declare boundary condition BC_UPDATE_OUTER macro, // which updates a single outer boundary face // of the 3D grid cube using quadratic polynomial // extrapolation. #define BC_UPDATE_OUTER(which_gf, i0,i1,i2, FACEX0,FACEX1,FACEX2) { \ const int idx3 = IDX3S(i0,i1,i2); \ gfs[IDX4S(which_gf,i0,i1,i2)] = \ +3.0*gfs[IDX4S(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \ -3.0*gfs[IDX4S(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \ +1.0*gfs[IDX4S(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \ } // Curvilinear boundary condition driver routine: Apply BCs to all six // boundary faces of the 3D numerical domain, filling in the // innermost ghost zone layer first, and moving outward. void apply_bcs_curvilinear(const paramstruct *restrict params, const bc_struct *restrict bcstruct, const int NUM_GFS, const int8_t *restrict gfs_parity, REAL *restrict gfs) { #pragma omp parallel for for(int which_gf=0;which_gf<NUM_GFS;which_gf++) { #include "RELATIVE_PATH__set_Cparameters.h" /* Header file containing correct #include for set_Cparameters.h; * accounting for the relative path */ for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) { // First apply OUTER boundary conditions, // in case an INNER (parity) boundary point // needs data at the outer boundary: // After updating each face, adjust imin[] and imax[] // to reflect the newly-updated face extents. for(int pt=0;pt<bcstruct->num_ob_gz_pts[which_gz];pt++) { BC_UPDATE_OUTER(which_gf, bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i0, bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i1, bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i2, bcstruct->outer[which_gz][pt].FACEi0, bcstruct->outer[which_gz][pt].FACEi1, bcstruct->outer[which_gz][pt].FACEi2); } // Then apply INNER (parity) boundary conditions: for(int pt=0;pt<bcstruct->num_ib_gz_pts[which_gz];pt++) { const int i0dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i0; const int i1dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i1; const int i2dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i2; const int i0src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i0; const int i1src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i1; const int i2src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i2; const int8_t *prty= bcstruct->inner[which_gz][pt].parity; // printf("%d\n",bcstruct->inner_bc_parity[which_gz][pt].parity[gfs_parity[which_gf]]); gfs[IDX4S(which_gf,i0dest,i1dest,i2dest)] = bcstruct->inner[which_gz][pt].parity[gfs_parity[which_gf]] * gfs[IDX4S(which_gf, i0src,i1src,i2src)]; } // END for(int pt=0;pt<num_ib_gz_pts[which_gz];pt++) } // END for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) } // END for(int which_gf=0;which_gf<NUM_GFS;which_gf++) } // END function
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl * startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Optional<std::pair<unsigned, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, ConceptDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); // Concepts Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); VarDecl *getVarTemplateSpecialization( VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs, const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
fx.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % FFFFF X X % % F X X % % FFF X % % F X X % % F X X % % % % % % MagickCore Image Special Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/fx-private.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define LeftShiftOperator 0xf5U #define RightShiftOperator 0xf6U #define LessThanEqualOperator 0xf7U #define GreaterThanEqualOperator 0xf8U #define EqualOperator 0xf9U #define NotEqualOperator 0xfaU #define LogicalAndOperator 0xfbU #define LogicalOrOperator 0xfcU #define ExponentialNotation 0xfdU struct _FxInfo { const Image *images; char *expression; FILE *file; SplayTreeInfo *colors, *symbols; CacheView **view; RandomInfo *random_info; ExceptionInfo *exception; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireFxInfo() allocates the FxInfo structure. % % The format of the AcquireFxInfo method is: % % FxInfo *AcquireFxInfo(Image *images,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o expression: the expression. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression, ExceptionInfo *exception) { char fx_op[2]; const Image *next; FxInfo *fx_info; register ssize_t i; fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info)); if (fx_info == (FxInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(fx_info,0,sizeof(*fx_info)); fx_info->exception=AcquireExceptionInfo(); fx_info->images=images; fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength( fx_info->images),sizeof(*fx_info->view)); if (fx_info->view == (CacheView **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); i=0; next=GetFirstImageInList(fx_info->images); for ( ; next != (Image *) NULL; next=next->next) { fx_info->view[i]=AcquireVirtualCacheView(next,exception); i++; } fx_info->random_info=AcquireRandomInfo(); fx_info->expression=ConstantString(expression); fx_info->file=stderr; (void) SubstituteString(&fx_info->expression," ",""); /* compact string */ /* Force right-to-left associativity for unary negation. */ (void) SubstituteString(&fx_info->expression,"-","-1.0*"); (void) SubstituteString(&fx_info->expression,"^-1.0*","^-"); (void) SubstituteString(&fx_info->expression,"E-1.0*","E-"); (void) SubstituteString(&fx_info->expression,"e-1.0*","e-"); /* Convert compound to simple operators. */ fx_op[1]='\0'; *fx_op=(char) LeftShiftOperator; (void) SubstituteString(&fx_info->expression,"<<",fx_op); *fx_op=(char) RightShiftOperator; (void) SubstituteString(&fx_info->expression,">>",fx_op); *fx_op=(char) LessThanEqualOperator; (void) SubstituteString(&fx_info->expression,"<=",fx_op); *fx_op=(char) GreaterThanEqualOperator; (void) SubstituteString(&fx_info->expression,">=",fx_op); *fx_op=(char) EqualOperator; (void) SubstituteString(&fx_info->expression,"==",fx_op); *fx_op=(char) NotEqualOperator; (void) SubstituteString(&fx_info->expression,"!=",fx_op); *fx_op=(char) LogicalAndOperator; (void) SubstituteString(&fx_info->expression,"&&",fx_op); *fx_op=(char) LogicalOrOperator; (void) SubstituteString(&fx_info->expression,"||",fx_op); *fx_op=(char) ExponentialNotation; (void) SubstituteString(&fx_info->expression,"**",fx_op); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d d N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AddNoiseImage() adds random noise to the image. % % The format of the AddNoiseImage method is: % % Image *AddNoiseImage(const Image *image,const NoiseType noise_type, % const double attenuate,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o noise_type: The type of noise: Uniform, Gaussian, Multiplicative, % Impulse, Laplacian, or Poisson. % % o attenuate: attenuate the random distribution. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type, const double attenuate,ExceptionInfo *exception) { #define AddNoiseImageTag "AddNoise/Image" CacheView *image_view, *noise_view; Image *noise_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateAddNoiseImage(image,noise_type,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } /* Add noise in each row. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,noise_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel); if ((traits == UndefinedPixelTrait) || (noise_traits == UndefinedPixelTrait)) continue; if (((noise_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(noise_image,channel,p[i],q); continue; } SetPixelChannel(noise_image,channel,ClampToQuantum( GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)), q); } p+=GetPixelChannels(image); q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AddNoiseImage) #endif proceed=SetImageProgress(image,AddNoiseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u e S h i f t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlueShiftImage() mutes the colors of the image to simulate a scene at % nighttime in the moonlight. % % The format of the BlueShiftImage method is: % % Image *BlueShiftImage(const Image *image,const double factor, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o factor: the shift factor. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlueShiftImage(const Image *image,const double factor, ExceptionInfo *exception) { #define BlueShiftImageTag "BlueShift/Image" CacheView *image_view, *shift_view; Image *shift_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate blue shift image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (shift_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse) { shift_image=DestroyImage(shift_image); return((Image *) NULL); } /* Blue-shift DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); shift_view=AcquireAuthenticCacheView(shift_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,shift_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; PixelInfo pixel; Quantum quantum; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) < quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) < quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum); pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum); pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum); quantum=GetPixelRed(image,p); if (GetPixelGreen(image,p) > quantum) quantum=GetPixelGreen(image,p); if (GetPixelBlue(image,p) > quantum) quantum=GetPixelBlue(image,p); pixel.red=0.5*(pixel.red+factor*quantum); pixel.green=0.5*(pixel.green+factor*quantum); pixel.blue=0.5*(pixel.blue+factor*quantum); SetPixelRed(shift_image,ClampToQuantum(pixel.red),q); SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q); SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q); p+=GetPixelChannels(image); q+=GetPixelChannels(shift_image); } sync=SyncCacheViewAuthenticPixels(shift_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlueShiftImage) #endif proceed=SetImageProgress(image,BlueShiftImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); shift_view=DestroyCacheView(shift_view); if (status == MagickFalse) shift_image=DestroyImage(shift_image); return(shift_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a r c o a l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CharcoalImage() creates a new image that is a copy of an existing one with % the edge highlighted. It allocates the memory necessary for the new Image % structure and returns a pointer to the new image. % % The format of the CharcoalImage method is: % % Image *CharcoalImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CharcoalImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { Image *charcoal_image, *clone_image, *edge_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); edge_image=EdgeImage(clone_image,radius,exception); clone_image=DestroyImage(clone_image); if (edge_image == (Image *) NULL) return((Image *) NULL); charcoal_image=BlurImage(edge_image,radius,sigma,exception); edge_image=DestroyImage(edge_image); if (charcoal_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(charcoal_image,exception); (void) NegateImage(charcoal_image,MagickFalse,exception); (void) GrayscaleImage(charcoal_image,image->intensity,exception); return(charcoal_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorizeImage() blends the fill color with each pixel in the image. % A percentage blend is specified with opacity. Control the application % of different color components by specifying a different percentage for % each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue). % % The format of the ColorizeImage method is: % % Image *ColorizeImage(const Image *image,const char *blend, % const PixelInfo *colorize,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A character string indicating the level of blending as a % percentage. % % o colorize: A color value. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ColorizeImage(const Image *image,const char *blend, const PixelInfo *colorize,ExceptionInfo *exception) { #define ColorizeImageTag "Colorize/Image" #define Colorize(pixel,blend_percentage,colorize) \ (((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0) CacheView *image_view; GeometryInfo geometry_info; Image *colorize_image; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; PixelInfo blend_percentage; ssize_t y; /* Allocate colorized image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); colorize_image=CloneImage(image,0,0,MagickTrue,exception); if (colorize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse) { colorize_image=DestroyImage(colorize_image); return((Image *) NULL); } if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) || (IsPixelInfoGray(colorize) != MagickFalse)) (void) SetImageColorspace(colorize_image,sRGBColorspace,exception); if ((colorize_image->alpha_trait == UndefinedPixelTrait) && (colorize->alpha_trait != UndefinedPixelTrait)) (void) SetImageAlpha(colorize_image,OpaqueAlpha,exception); if (blend == (const char *) NULL) return(colorize_image); GetPixelInfo(colorize_image,&blend_percentage); flags=ParseGeometry(blend,&geometry_info); blend_percentage.red=geometry_info.rho; blend_percentage.green=geometry_info.rho; blend_percentage.blue=geometry_info.rho; blend_percentage.black=geometry_info.rho; blend_percentage.alpha=(MagickRealType) TransparentAlpha; if ((flags & SigmaValue) != 0) blend_percentage.green=geometry_info.sigma; if ((flags & XiValue) != 0) blend_percentage.blue=geometry_info.xi; if ((flags & PsiValue) != 0) blend_percentage.alpha=geometry_info.psi; if (blend_percentage.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) blend_percentage.black=geometry_info.psi; if ((flags & ChiValue) != 0) blend_percentage.alpha=geometry_info.chi; } /* Colorize DirectClass image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(colorize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(colorize_image,colorize_image,colorize_image->rows,1) #endif for (y=0; y < (ssize_t) colorize_image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) colorize_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++) { PixelTrait traits=GetPixelChannelTraits(colorize_image, (PixelChannel) i); if (traits == UndefinedPixelTrait) continue; if (((traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(colorize_image,q) == 0)) continue; SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum( Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i), GetPixelInfoChannel(colorize,(PixelChannel) i))),q); } q+=GetPixelChannels(colorize_image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorizeImage) #endif proceed=SetImageProgress(image,ColorizeImageTag,progress++, colorize_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) colorize_image=DestroyImage(colorize_image); return(colorize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r M a t r i x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorMatrixImage() applies color transformation to an image. This method % permits saturation changes, hue rotation, luminance to alpha, and various % other effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the ColorMatrixImage method is: % % Image *ColorMatrixImage(const Image *image, % const KernelInfo *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_matrix: the color matrix. % % o exception: return any errors or warnings in this structure. % */ /* FUTURE: modify to make use of a MagickMatrix Mutliply function That should be provided in "matrix.c" (ASIDE: actually distorts should do this too but currently doesn't) */ MagickExport Image *ColorMatrixImage(const Image *image, const KernelInfo *color_matrix,ExceptionInfo *exception) { #define ColorMatrixImageTag "ColorMatrix/Image" CacheView *color_view, *image_view; double ColorMatrix[6][6] = { { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 } }; Image *color_image; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t u, v, y; /* Map given color_matrix, into a 6x6 matrix RGBKA and a constant */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); i=0; for (v=0; v < (ssize_t) color_matrix->height; v++) for (u=0; u < (ssize_t) color_matrix->width; u++) { if ((v < 6) && (u < 6)) ColorMatrix[v][u]=color_matrix->values[i]; i++; } /* Initialize color image. */ color_image=CloneImage(image,0,0,MagickTrue,exception); if (color_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse) { color_image=DestroyImage(color_image); return((Image *) NULL); } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " ColorMatrix image with color matrix:"); message=AcquireString(""); for (v=0; v < 6; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < 6; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ", ColorMatrix[v][u]); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } /* Apply the ColorMatrix to image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); color_view=AcquireAuthenticCacheView(color_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,color_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t v; size_t height; GetPixelInfoPixel(image,p,&pixel); height=color_matrix->height > 6 ? 6UL : color_matrix->height; for (v=0; v < (ssize_t) height; v++) { double sum; sum=ColorMatrix[v][0]*GetPixelRed(image,p)+ColorMatrix[v][1]* GetPixelGreen(image,p)+ColorMatrix[v][2]*GetPixelBlue(image,p); if (image->colorspace == CMYKColorspace) sum+=ColorMatrix[v][3]*GetPixelBlack(image,p); if (image->alpha_trait != UndefinedPixelTrait) sum+=ColorMatrix[v][4]*GetPixelAlpha(image,p); sum+=QuantumRange*ColorMatrix[v][5]; switch (v) { case 0: pixel.red=sum; break; case 1: pixel.green=sum; break; case 2: pixel.blue=sum; break; case 3: pixel.black=sum; break; case 4: pixel.alpha=sum; break; default: break; } } SetPixelViaPixelInfo(color_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(color_image); } if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorMatrixImage) #endif proceed=SetImageProgress(image,ColorMatrixImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } color_view=DestroyCacheView(color_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) color_image=DestroyImage(color_image); return(color_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y F x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyFxInfo() deallocates memory associated with an FxInfo structure. % % The format of the DestroyFxInfo method is: % % ImageInfo *DestroyFxInfo(ImageInfo *fx_info) % % A description of each parameter follows: % % o fx_info: the fx info. % */ MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info) { register ssize_t i; fx_info->exception=DestroyExceptionInfo(fx_info->exception); fx_info->expression=DestroyString(fx_info->expression); fx_info->symbols=DestroySplayTree(fx_info->symbols); fx_info->colors=DestroySplayTree(fx_info->colors); for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--) fx_info->view[i]=DestroyCacheView(fx_info->view[i]); fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view); fx_info->random_info=DestroyRandomInfo(fx_info->random_info); fx_info=(FxInfo *) RelinquishMagickMemory(fx_info); return(fx_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F x E v a l u a t e C h a n n e l E x p r e s s i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxEvaluateChannelExpression() evaluates an expression and returns the % results. % % The format of the FxEvaluateExpression method is: % % double FxEvaluateChannelExpression(FxInfo *fx_info, % const PixelChannel channel,const ssize_t x,const ssize_t y, % double *alpha,Exceptioninfo *exception) % double FxEvaluateExpression(FxInfo *fx_info, % double *alpha,Exceptioninfo *exception) % % A description of each parameter follows: % % o fx_info: the fx info. % % o channel: the channel. % % o x,y: the pixel position. % % o alpha: the result. % % o exception: return any errors or warnings in this structure. % */ static double FxChannelStatistics(FxInfo *fx_info,Image *image, PixelChannel channel,const char *symbol,ExceptionInfo *exception) { ChannelType channel_mask; char key[MagickPathExtent], statistic[MagickPathExtent]; const char *value; register const char *p; channel_mask=UndefinedChannel; for (p=symbol; (*p != '.') && (*p != '\0'); p++) ; if (*p == '.') { ssize_t option; option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1); if (option >= 0) { channel=(PixelChannel) option; channel_mask=SetPixelChannelMask(image,(ChannelType) (1 << channel)); } } (void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image, (double) channel,symbol); value=(const char *) GetValueFromSplayTree(fx_info->symbols,key); if (value != (const char *) NULL) { if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); return(QuantumScale*StringToDouble(value,(char **) NULL)); } (void) DeleteNodeFromSplayTree(fx_info->symbols,key); if (LocaleNCompare(symbol,"depth",5) == 0) { size_t depth; depth=GetImageDepth(image,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%.20g",(double) depth); } if (LocaleNCompare(symbol,"kurtosis",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",kurtosis); } if (LocaleNCompare(symbol,"maxima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",maxima); } if (LocaleNCompare(symbol,"mean",4) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",mean); } if (LocaleNCompare(symbol,"minima",6) == 0) { double maxima, minima; (void) GetImageRange(image,&minima,&maxima,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",minima); } if (LocaleNCompare(symbol,"skewness",8) == 0) { double kurtosis, skewness; (void) GetImageKurtosis(image,&kurtosis,&skewness,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g",skewness); } if (LocaleNCompare(symbol,"standard_deviation",18) == 0) { double mean, standard_deviation; (void) GetImageMean(image,&mean,&standard_deviation,exception); (void) FormatLocaleString(statistic,MagickPathExtent,"%g", standard_deviation); } if (channel_mask != UndefinedChannel) (void) SetPixelChannelMask(image,channel_mask); (void) AddValueToSplayTree(fx_info->symbols,ConstantString(key), ConstantString(statistic)); return(QuantumScale*StringToDouble(statistic,(char **) NULL)); } static double FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t, const ssize_t,const char *,size_t *,double *,ExceptionInfo *); static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta) { if (beta != 0) return(FxGCD(beta,alpha % beta)); return(alpha); } static inline const char *FxSubexpression(const char *expression, ExceptionInfo *exception) { const char *subexpression; register ssize_t level; level=0; subexpression=expression; while ((*subexpression != '\0') && ((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL))) { if (strchr("(",(int) *subexpression) != (char *) NULL) level++; else if (strchr(")",(int) *subexpression) != (char *) NULL) level--; subexpression++; } if (*subexpression == '\0') (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnbalancedParenthesis","`%s'",expression); return(subexpression); } static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel, const ssize_t x,const ssize_t y,const char *expression, ExceptionInfo *exception) { char *q, subexpression[MagickPathExtent], symbol[MagickPathExtent]; const char *p, *value; Image *image; PixelInfo pixel; double alpha, beta; PointInfo point; register ssize_t i; size_t depth, length, level; p=expression; i=GetImageIndexInList(fx_info->images); depth=0; level=0; point.x=(double) x; point.y=(double) y; if (isalpha((int) ((unsigned char) *(p+1))) == 0) { if (strchr("suv",(int) *p) != (char *) NULL) { switch (*p) { case 's': default: { i=GetImageIndexInList(fx_info->images); break; } case 'u': i=0; break; case 'v': i=1; break; } p++; if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &depth,&beta,exception); i=(ssize_t) alpha; p++; } if (*p == '.') p++; } if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0)) { p++; if (*p == '{') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '{') level++; else if (*p == '}') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &depth,&beta,exception); point.x=alpha; point.y=beta; p++; } else if (*p == '[') { level++; q=subexpression; for (p++; *p != '\0'; ) { if (*p == '[') level++; else if (*p == ']') { level--; if (level == 0) break; } *q++=(*p++); } *q='\0'; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression, &depth,&beta,exception); point.x+=alpha; point.y+=beta; p++; } if (*p == '.') p++; } } length=GetImageListLength(fx_info->images); while (i < 0) i+=(ssize_t) length; if (length != 0) i%=length; image=GetImageFromList(fx_info->images,i); if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "NoSuchImage","`%s'",expression); return(0.0); } GetPixelInfo(image,&pixel); (void) InterpolatePixelInfo(image,fx_info->view[i],image->interpolate, point.x,point.y,&pixel,exception); if ((strlen(p) > 2) && (LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) && (LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) && (LocaleCompare(p,"saturation") != 0) && (LocaleCompare(p,"lightness") != 0)) { char name[MagickPathExtent]; (void) CopyMagickString(name,p,MagickPathExtent); for (q=name+(strlen(name)-1); q > name; q--) { if (*q == ')') break; if (*q == '.') { *q='\0'; break; } } if ((strlen(name) > 2) && (GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL)) { PixelInfo *color; color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name); if (color != (PixelInfo *) NULL) { pixel=(*color); p+=strlen(name); } else { MagickBooleanType status; status=QueryColorCompliance(name,AllCompliance,&pixel, fx_info->exception); if (status != MagickFalse) { (void) AddValueToSplayTree(fx_info->colors,ConstantString( name),ClonePixelInfo(&pixel)); p+=strlen(name); } } } } (void) CopyMagickString(symbol,p,MagickPathExtent); StripString(symbol); if (*symbol == '\0') { switch (channel) { case RedPixelChannel: return(QuantumScale*pixel.red); case GreenPixelChannel: return(QuantumScale*pixel.green); case BluePixelChannel: return(QuantumScale*pixel.blue); case BlackPixelChannel: { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } case AlphaPixelChannel: { if (pixel.alpha_trait == UndefinedPixelTrait) return(1.0); alpha=(double) (QuantumScale*pixel.alpha); return(alpha); } case IndexPixelChannel: return(0.0); case IntensityPixelChannel: { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } default: break; } (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",p); return(0.0); } switch (*symbol) { case 'A': case 'a': { if (LocaleCompare(symbol,"a") == 0) return((QuantumScale*pixel.alpha)); break; } case 'B': case 'b': { if (LocaleCompare(symbol,"b") == 0) return(QuantumScale*pixel.blue); break; } case 'C': case 'c': { if (LocaleNCompare(symbol,"channel",7) == 0) { GeometryInfo channel_info; MagickStatusType flags; flags=ParseGeometry(symbol+7,&channel_info); if (image->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case MagentaPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case YellowPixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } case AlphaPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } default: return(0.0); } switch (channel) { case RedPixelChannel: { if ((flags & RhoValue) == 0) return(0.0); return(channel_info.rho); } case GreenPixelChannel: { if ((flags & SigmaValue) == 0) return(0.0); return(channel_info.sigma); } case BluePixelChannel: { if ((flags & XiValue) == 0) return(0.0); return(channel_info.xi); } case BlackPixelChannel: { if ((flags & ChiValue) == 0) return(0.0); return(channel_info.chi); } case AlphaPixelChannel: { if ((flags & PsiValue) == 0) return(0.0); return(channel_info.psi); } default: return(0.0); } } if (LocaleCompare(symbol,"c") == 0) return(QuantumScale*pixel.red); break; } case 'D': case 'd': { if (LocaleNCompare(symbol,"depth",5) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'G': case 'g': { if (LocaleCompare(symbol,"g") == 0) return(QuantumScale*pixel.green); break; } case 'K': case 'k': { if (LocaleNCompare(symbol,"kurtosis",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"k") == 0) { if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"ColorSeparatedImageRequired","`%s'", image->filename); return(0.0); } return(QuantumScale*pixel.black); } break; } case 'H': case 'h': { if (LocaleCompare(symbol,"h") == 0) return(image->rows); if (LocaleCompare(symbol,"hue") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(hue); } break; } case 'I': case 'i': { if ((LocaleCompare(symbol,"image.depth") == 0) || (LocaleCompare(symbol,"image.minima") == 0) || (LocaleCompare(symbol,"image.maxima") == 0) || (LocaleCompare(symbol,"image.mean") == 0) || (LocaleCompare(symbol,"image.kurtosis") == 0) || (LocaleCompare(symbol,"image.skewness") == 0) || (LocaleCompare(symbol,"image.standard_deviation") == 0)) return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception)); if (LocaleCompare(symbol,"image.resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"image.resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"intensity") == 0) { Quantum quantum_pixel[MaxPixelChannels]; SetPixelViaPixelInfo(image,&pixel,quantum_pixel); return(QuantumScale*GetPixelIntensity(image,quantum_pixel)); } if (LocaleCompare(symbol,"i") == 0) return(x); break; } case 'J': case 'j': { if (LocaleCompare(symbol,"j") == 0) return(y); break; } case 'L': case 'l': { if (LocaleCompare(symbol,"lightness") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(lightness); } if (LocaleCompare(symbol,"luma") == 0) { double luma; luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luma); } if (LocaleCompare(symbol,"luminance") == 0) { double luminence; luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue; return(QuantumScale*luminence); } break; } case 'M': case 'm': { if (LocaleNCompare(symbol,"maxima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"mean",4) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"minima",6) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleCompare(symbol,"m") == 0) return(QuantumScale*pixel.green); break; } case 'N': case 'n': { if (LocaleCompare(symbol,"n") == 0) return(GetImageListLength(fx_info->images)); break; } case 'O': case 'o': { if (LocaleCompare(symbol,"o") == 0) return(QuantumScale*pixel.alpha); break; } case 'P': case 'p': { if (LocaleCompare(symbol,"page.height") == 0) return(image->page.height); if (LocaleCompare(symbol,"page.width") == 0) return(image->page.width); if (LocaleCompare(symbol,"page.x") == 0) return(image->page.x); if (LocaleCompare(symbol,"page.y") == 0) return(image->page.y); break; } case 'Q': case 'q': { if (LocaleCompare(symbol,"quality") == 0) return(image->quality); break; } case 'R': case 'r': { if (LocaleCompare(symbol,"resolution.x") == 0) return(image->resolution.x); if (LocaleCompare(symbol,"resolution.y") == 0) return(image->resolution.y); if (LocaleCompare(symbol,"r") == 0) return(QuantumScale*pixel.red); break; } case 'S': case 's': { if (LocaleCompare(symbol,"saturation") == 0) { double hue, lightness, saturation; ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation, &lightness); return(saturation); } if (LocaleNCompare(symbol,"skewness",8) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); if (LocaleNCompare(symbol,"standard_deviation",18) == 0) return(FxChannelStatistics(fx_info,image,channel,symbol,exception)); break; } case 'T': case 't': { if (LocaleCompare(symbol,"t") == 0) return(GetImageIndexInList(fx_info->images)); break; } case 'W': case 'w': { if (LocaleCompare(symbol,"w") == 0) return(image->columns); break; } case 'Y': case 'y': { if (LocaleCompare(symbol,"y") == 0) return(QuantumScale*pixel.blue); break; } case 'Z': case 'z': { if (LocaleCompare(symbol,"z") == 0) return((double)GetImageDepth(image, fx_info->exception)); break; } default: break; } value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol); if (value != (const char *) NULL) return(StringToDouble(value,(char **) NULL)); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "UnableToParseExpression","`%s'",symbol); return(0.0); } static const char *FxOperatorPrecedence(const char *expression, ExceptionInfo *exception) { typedef enum { UndefinedPrecedence, NullPrecedence, BitwiseComplementPrecedence, ExponentPrecedence, ExponentialNotationPrecedence, MultiplyPrecedence, AdditionPrecedence, ShiftPrecedence, RelationalPrecedence, EquivalencyPrecedence, BitwiseAndPrecedence, BitwiseOrPrecedence, LogicalAndPrecedence, LogicalOrPrecedence, TernaryPrecedence, AssignmentPrecedence, CommaPrecedence, SeparatorPrecedence } FxPrecedence; FxPrecedence precedence, target; register const char *subexpression; register int c; size_t level; c=0; level=0; subexpression=(const char *) NULL; target=NullPrecedence; while (*expression != '\0') { precedence=UndefinedPrecedence; if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@')) { expression++; continue; } switch (*expression) { case 'A': case 'a': { #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { expression+=5; break; } #endif #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { expression+=5; break; } #endif if (LocaleNCompare(expression,"atan2",5) == 0) { expression+=5; break; } break; } case 'E': case 'e': { if ((isdigit((int) ((unsigned char) c)) != 0) && ((LocaleNCompare(expression,"E+",2) == 0) || (LocaleNCompare(expression,"E-",2) == 0))) { expression+=2; /* scientific notation */ break; } } case 'J': case 'j': { if ((LocaleNCompare(expression,"j0",2) == 0) || (LocaleNCompare(expression,"j1",2) == 0)) { expression+=2; break; } break; } case '#': { while (isxdigit((int) ((unsigned char) *(expression+1))) != 0) expression++; break; } default: break; } if ((c == (int) '{') || (c == (int) '[')) level++; else if ((c == (int) '}') || (c == (int) ']')) level--; if (level == 0) switch ((unsigned char) *expression) { case '~': case '!': { precedence=BitwiseComplementPrecedence; break; } case '^': case '@': { precedence=ExponentPrecedence; break; } default: { if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) || (strchr(")",(int) ((unsigned char) c)) != (char *) NULL))) && (((islower((int) ((unsigned char) *expression)) != 0) || (strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) || ((isdigit((int) ((unsigned char) c)) == 0) && (isdigit((int) ((unsigned char) *expression)) != 0))) && (strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL)) precedence=MultiplyPrecedence; break; } case '*': case '/': case '%': { precedence=MultiplyPrecedence; break; } case '+': case '-': { if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) || (isalpha(c) != 0)) precedence=AdditionPrecedence; break; } case LeftShiftOperator: case RightShiftOperator: { precedence=ShiftPrecedence; break; } case '<': case LessThanEqualOperator: case GreaterThanEqualOperator: case '>': { precedence=RelationalPrecedence; break; } case EqualOperator: case NotEqualOperator: { precedence=EquivalencyPrecedence; break; } case '&': { precedence=BitwiseAndPrecedence; break; } case '|': { precedence=BitwiseOrPrecedence; break; } case LogicalAndOperator: { precedence=LogicalAndPrecedence; break; } case LogicalOrOperator: { precedence=LogicalOrPrecedence; break; } case ExponentialNotation: { precedence=ExponentialNotationPrecedence; break; } case ':': case '?': { precedence=TernaryPrecedence; break; } case '=': { precedence=AssignmentPrecedence; break; } case ',': { precedence=CommaPrecedence; break; } case ';': { precedence=SeparatorPrecedence; break; } } if ((precedence == BitwiseComplementPrecedence) || (precedence == TernaryPrecedence) || (precedence == AssignmentPrecedence)) { if (precedence > target) { /* Right-to-left associativity. */ target=precedence; subexpression=expression; } } else if (precedence >= target) { /* Left-to-right associativity. */ target=precedence; subexpression=expression; } if (strchr("(",(int) *expression) != (char *) NULL) expression=FxSubexpression(expression,exception); c=(int) (*expression++); } return(subexpression); } static double FxEvaluateSubexpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, const char *expression,size_t *depth,double *beta,ExceptionInfo *exception) { #define FxMaxParenthesisDepth 58 char *q, subexpression[MagickPathExtent]; double alpha, gamma; register const char *p; *beta=0.0; if (exception->severity >= ErrorException) return(0.0); while (isspace((int) ((unsigned char) *expression)) != 0) expression++; if (*expression == '\0') return(0.0); *subexpression='\0'; p=FxOperatorPrecedence(expression,exception); if (p != (const char *) NULL) { (void) CopyMagickString(subexpression,expression,(size_t) (p-expression+1)); alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth, beta,exception); switch ((unsigned char) *p) { case '~': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) (~(size_t) *beta); return(*beta); } case '!': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(*beta == 0.0 ? 1.0 : 0.0); } case '^': { *beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth, beta,exception)); return(*beta); } case '*': case ExponentialNotation: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha*(*beta)); } case '/': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(alpha/(*beta)); } case '%': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=fabs(floor((*beta)+0.5)); if (*beta == 0.0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"DivideByZero","`%s'",expression); return(0.0); } return(fmod(alpha,*beta)); } case '+': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha+(*beta)); } case '-': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha-(*beta)); } case LeftShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5)); return(*beta); } case RightShiftOperator: { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5)); return(*beta); } case '<': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha < *beta ? 1.0 : 0.0); } case LessThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha <= *beta ? 1.0 : 0.0); } case '>': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha > *beta ? 1.0 : 0.0); } case GreaterThanEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha >= *beta ? 1.0 : 0.0); } case EqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0); } case NotEqualOperator: { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0); } case '&': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5)); return(*beta); } case '|': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); *beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5)); return(*beta); } case LogicalAndOperator: { p++; if (alpha <= 0.0) { *beta=0.0; return(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case LogicalOrOperator: { p++; if (alpha > 0.0) { *beta=1.0; return(*beta); } gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); *beta=(gamma > 0.0) ? 1.0 : 0.0; return(*beta); } case '?': { (void) CopyMagickString(subexpression,++p,MagickPathExtent); q=subexpression; p=StringToken(":",&q); if (q == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } if (fabs(alpha) >= MagickEpsilon) gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); else gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth,beta, exception); return(gamma); } case '=': { char numeric[MagickPathExtent]; q=subexpression; while (isalpha((int) ((unsigned char) *q)) != 0) q++; if (*q != '\0') { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"UnableToParseExpression","`%s'",subexpression); return(0.0); } ClearMagickException(exception); *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); (void) FormatLocaleString(numeric,MagickPathExtent,"%g",*beta); (void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression); (void) AddValueToSplayTree(fx_info->symbols,ConstantString( subexpression),ConstantString(numeric)); return(*beta); } case ',': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(alpha); } case ';': { *beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta, exception); return(*beta); } default: { gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta, exception); return(gamma); } } } if (strchr("(",(int) *expression) != (char *) NULL) { (*depth)++; if (*depth >= FxMaxParenthesisDepth) (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "ParenthesisNestedTooDeeply","`%s'",expression); (void) CopyMagickString(subexpression,expression+1,MagickPathExtent); subexpression[strlen(subexpression)-1]='\0'; gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth, beta,exception); (*depth)--; return(gamma); } switch (*expression) { case '+': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta, exception); return(1.0*gamma); } case '-': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta, exception); return(-1.0*gamma); } case '~': { gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta, exception); return((~(size_t) (gamma+0.5))); } case 'A': case 'a': { if (LocaleNCompare(expression,"abs",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(fabs(alpha)); } #if defined(MAGICKCORE_HAVE_ACOSH) if (LocaleNCompare(expression,"acosh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(acosh(alpha)); } #endif if (LocaleNCompare(expression,"acos",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(acos(alpha)); } #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"airy",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); if (alpha == 0.0) return(1.0); gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha); return(gamma*gamma); } #endif #if defined(MAGICKCORE_HAVE_ASINH) if (LocaleNCompare(expression,"asinh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(asinh(alpha)); } #endif if (LocaleNCompare(expression,"asin",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(asin(alpha)); } if (LocaleNCompare(expression,"alt",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"atan2",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(atan2(alpha,*beta)); } #if defined(MAGICKCORE_HAVE_ATANH) if (LocaleNCompare(expression,"atanh",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(atanh(alpha)); } #endif if (LocaleNCompare(expression,"atan",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(atan(alpha)); } if (LocaleCompare(expression,"a") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'B': case 'b': { if (LocaleCompare(expression,"b") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'C': case 'c': { if (LocaleNCompare(expression,"ceil",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(ceil(alpha)); } if (LocaleNCompare(expression,"clamp",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); if (alpha < 0.0) return(0.0); if (alpha > 1.0) return(1.0); return(alpha); } if (LocaleNCompare(expression,"cosh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(cosh(alpha)); } if (LocaleNCompare(expression,"cos",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(cos(alpha)); } if (LocaleCompare(expression,"c") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'D': case 'd': { if (LocaleNCompare(expression,"debug",5) == 0) { const char *type; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); if (fx_info->images->colorspace == CMYKColorspace) switch (channel) { case CyanPixelChannel: type="cyan"; break; case MagentaPixelChannel: type="magenta"; break; case YellowPixelChannel: type="yellow"; break; case AlphaPixelChannel: type="opacity"; break; case BlackPixelChannel: type="black"; break; default: type="unknown"; break; } else switch (channel) { case RedPixelChannel: type="red"; break; case GreenPixelChannel: type="green"; break; case BluePixelChannel: type="blue"; break; case AlphaPixelChannel: type="opacity"; break; default: type="unknown"; break; } (void) CopyMagickString(subexpression,expression+6,MagickPathExtent); if (strlen(subexpression) > 1) subexpression[strlen(subexpression)-1]='\0'; if (fx_info->file != (FILE *) NULL) (void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: " "%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type, subexpression,GetMagickPrecision(),alpha); return(0.0); } if (LocaleNCompare(expression,"drc",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return((alpha/(*beta*(alpha-1.0)+1.0))); } break; } case 'E': case 'e': { if (LocaleCompare(expression,"epsilon") == 0) return(MagickEpsilon); #if defined(MAGICKCORE_HAVE_ERF) if (LocaleNCompare(expression,"erf",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(erf(alpha)); } #endif if (LocaleNCompare(expression,"exp",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(exp(alpha)); } if (LocaleCompare(expression,"e") == 0) return(2.7182818284590452354); break; } case 'F': case 'f': { if (LocaleNCompare(expression,"floor",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(floor(alpha)); } break; } case 'G': case 'g': { if (LocaleNCompare(expression,"gauss",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); gamma=exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI); return(gamma); } if (LocaleNCompare(expression,"gcd",3) == 0) { MagickOffsetType gcd; alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+ 0.5)); return(gcd); } if (LocaleCompare(expression,"g") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'H': case 'h': { if (LocaleCompare(expression,"h") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleCompare(expression,"hue") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"hypot",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(hypot(alpha,*beta)); } break; } case 'K': case 'k': { if (LocaleCompare(expression,"k") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'I': case 'i': { if (LocaleCompare(expression,"intensity") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"int",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(floor(alpha)); } if (LocaleNCompare(expression,"isnan",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(!!IsNaN(alpha)); } if (LocaleCompare(expression,"i") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'J': case 'j': { if (LocaleCompare(expression,"j") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); #if defined(MAGICKCORE_HAVE_J0) if (LocaleNCompare(expression,"j0",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception); return(j0(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"j1",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception); return(j1(alpha)); } #endif #if defined(MAGICKCORE_HAVE_J1) if (LocaleNCompare(expression,"jinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); if (alpha == 0.0) return(1.0); gamma=(2.0*j1((MagickPI*alpha))/(MagickPI*alpha)); return(gamma); } #endif break; } case 'L': case 'l': { if (LocaleNCompare(expression,"ln",2) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception); return(log(alpha)); } if (LocaleNCompare(expression,"logtwo",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth, beta,exception); return(log10(alpha))/log10(2.0); } if (LocaleNCompare(expression,"log",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(log10(alpha)); } if (LocaleCompare(expression,"lightness") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'M': case 'm': { if (LocaleCompare(expression,"MaxRGB") == 0) return(QuantumRange); if (LocaleNCompare(expression,"maxima",6) == 0) break; if (LocaleNCompare(expression,"max",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(alpha > *beta ? alpha : *beta); } if (LocaleNCompare(expression,"minima",6) == 0) break; if (LocaleNCompare(expression,"min",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(alpha < *beta ? alpha : *beta); } if (LocaleNCompare(expression,"mod",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); gamma=alpha-floor((alpha/(*beta)))*(*beta); return(gamma); } if (LocaleCompare(expression,"m") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'N': case 'n': { if (LocaleNCompare(expression,"not",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return((alpha < MagickEpsilon)); } if (LocaleCompare(expression,"n") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'O': case 'o': { if (LocaleCompare(expression,"Opaque") == 0) return(1.0); if (LocaleCompare(expression,"o") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'P': case 'p': { if (LocaleCompare(expression,"phi") == 0) return(MagickPHI); if (LocaleCompare(expression,"pi") == 0) return(MagickPI); if (LocaleNCompare(expression,"pow",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(pow(alpha,*beta)); } if (LocaleCompare(expression,"p") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Q': case 'q': { if (LocaleCompare(expression,"QuantumRange") == 0) return(QuantumRange); if (LocaleCompare(expression,"QuantumScale") == 0) return(QuantumScale); break; } case 'R': case 'r': { if (LocaleNCompare(expression,"rand",4) == 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxEvaluateSubexpression) #endif alpha=GetPseudoRandomValue(fx_info->random_info); return(alpha); } if (LocaleNCompare(expression,"round",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); return(floor(alpha+0.5)); } if (LocaleCompare(expression,"r") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'S': case 's': { if (LocaleCompare(expression,"saturation") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); if (LocaleNCompare(expression,"sign",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(alpha < 0.0 ? -1.0 : 1.0); } if (LocaleNCompare(expression,"sinc",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); if (alpha == 0) return(1.0); gamma=sin((MagickPI*alpha))/(MagickPI*alpha); return(gamma); } if (LocaleNCompare(expression,"sinh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(sinh(alpha)); } if (LocaleNCompare(expression,"sin",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(sin(alpha)); } if (LocaleNCompare(expression,"sqrt",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(sqrt(alpha)); } if (LocaleNCompare(expression,"squish",6) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth, beta,exception); return((1.0/(1.0+exp(-alpha)))); } if (LocaleCompare(expression,"s") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'T': case 't': { if (LocaleNCompare(expression,"tanh",4) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception); return(tanh(alpha)); } if (LocaleNCompare(expression,"tan",3) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth, beta,exception); return(tan(alpha)); } if (LocaleCompare(expression,"Transparent") == 0) return(0.0); if (LocaleNCompare(expression,"trunc",5) == 0) { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth, beta,exception); if (alpha >= 0.0) return(floor(alpha)); return(ceil(alpha)); } if (LocaleCompare(expression,"t") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'U': case 'u': { if (LocaleCompare(expression,"u") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'V': case 'v': { if (LocaleCompare(expression,"v") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'W': case 'w': { if (LocaleNCompare(expression,"while",5) == 0) { do { alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5, depth,beta,exception); } while (fabs(alpha) >= MagickEpsilon); return(*beta); } if (LocaleCompare(expression,"w") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Y': case 'y': { if (LocaleCompare(expression,"y") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } case 'Z': case 'z': { if (LocaleCompare(expression,"z") == 0) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); break; } default: break; } q=(char *) expression; alpha=InterpretSiPrefixValue(expression,&q); if (q == expression) return(FxGetSymbol(fx_info,channel,x,y,expression,exception)); return(alpha); } MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { MagickBooleanType status; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); return(status); } MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info, double *alpha,ExceptionInfo *exception) { FILE *file; MagickBooleanType status; file=fx_info->file; fx_info->file=(FILE *) NULL; status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha, exception); fx_info->file=file; return(status); } MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info, const PixelChannel channel,const ssize_t x,const ssize_t y, double *alpha,ExceptionInfo *exception) { double beta; size_t depth; depth=0; beta=0.0; *alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&depth, &beta,exception); return(exception->severity == OptionError ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F x I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FxImage() applies a mathematical expression to the specified image. % % The format of the FxImage method is: % % Image *FxImage(const Image *image,const char *expression, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o expression: A mathematical expression. % % o exception: return any errors or warnings in this structure. % */ static FxInfo **DestroyFxThreadSet(FxInfo **fx_info) { register ssize_t i; assert(fx_info != (FxInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (fx_info[i] != (FxInfo *) NULL) fx_info[i]=DestroyFxInfo(fx_info[i]); fx_info=(FxInfo **) RelinquishMagickMemory(fx_info); return(fx_info); } static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression, ExceptionInfo *exception) { char *fx_expression; FxInfo **fx_info; double alpha; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info)); if (fx_info == (FxInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return((FxInfo **) NULL); } (void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info)); if (*expression != '@') fx_expression=ConstantString(expression); else fx_expression=FileToString(expression+1,~0UL,exception); for (i=0; i < (ssize_t) number_threads; i++) { MagickBooleanType status; fx_info[i]=AcquireFxInfo(image,fx_expression,exception); if (fx_info[i] == (FxInfo *) NULL) break; status=FxPreprocessExpression(fx_info[i],&alpha,exception); if (status == MagickFalse) break; } fx_expression=DestroyString(fx_expression); if (i < (ssize_t) number_threads) fx_info=DestroyFxThreadSet(fx_info); return(fx_info); } MagickExport Image *FxImage(const Image *image,const char *expression, ExceptionInfo *exception) { #define FxImageTag "Fx/Image" CacheView *fx_view, *image_view; FxInfo **magick_restrict fx_info; Image *fx_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); fx_info=AcquireFxThreadSet(image,expression,exception); if (fx_info == (FxInfo **) NULL) return((Image *) NULL); fx_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (fx_image == (Image *) NULL) { fx_info=DestroyFxThreadSet(fx_info); return((Image *) NULL); } if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) { fx_info=DestroyFxThreadSet(fx_info); fx_image=DestroyImage(fx_image); return((Image *) NULL); } /* Fx image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); fx_view=AcquireAuthenticCacheView(fx_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,fx_image,fx_image->rows,1) #endif for (y=0; y < (ssize_t) fx_image->rows; y++) { const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) fx_image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel); if ((traits == UndefinedPixelTrait) || (fx_traits == UndefinedPixelTrait)) continue; if (((fx_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(fx_image,channel,p[i],q); continue; } alpha=0.0; (void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha, exception); q[i]=ClampToQuantum(QuantumRange*alpha); } p+=GetPixelChannels(image); q+=GetPixelChannels(fx_image); } if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FxImage) #endif proceed=SetImageProgress(image,FxImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } fx_view=DestroyCacheView(fx_view); image_view=DestroyCacheView(image_view); fx_info=DestroyFxThreadSet(fx_info); if (status == MagickFalse) fx_image=DestroyImage(fx_image); return(fx_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p l o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImplodeImage() creates a new image that is a copy of an existing % one with the image pixels "implode" by the specified percentage. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ImplodeImage method is: % % Image *ImplodeImage(const Image *image,const double amount, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o implode_image: Method ImplodeImage returns a pointer to the image % after it is implode. A null image is returned if there is a memory % shortage. % % o image: the image. % % o amount: Define the extent of the implosion. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ImplodeImage(const Image *image,const double amount, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ImplodeImageTag "Implode/Image" CacheView *image_view, *implode_view, *interpolate_view; Image *implode_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize implode image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); implode_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (implode_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse) { implode_image=DestroyImage(implode_image); return((Image *) NULL); } if (implode_image->background_color.alpha != OpaqueAlpha) implode_image->alpha_trait=BlendPixelTrait; /* Compute scaling factor. */ scale.x=1.0; scale.y=1.0; center.x=0.5*image->columns; center.y=0.5*image->rows; radius=center.x; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) { scale.x=(double) image->rows/(double) image->columns; radius=center.y; } /* Implode image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); interpolate_view=AcquireVirtualCacheView(image,exception); implode_view=AcquireAuthenticCacheView(implode_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,implode_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; /* Determine if the pixel is within an ellipse. */ if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(implode_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(implode_image); continue; } delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait implode_traits=GetPixelChannelTraits(implode_image, channel); if ((traits == UndefinedPixelTrait) || (implode_traits == UndefinedPixelTrait)) continue; SetPixelChannel(implode_image,channel,p[i],q); } else { double factor; /* Implode the pixel. */ factor=1.0; if (distance > 0.0) factor=pow(sin(MagickPI*sqrt((double) distance)/radius/2),-amount); status=InterpolatePixelChannels(image,interpolate_view,implode_image, method,(double) (factor*delta.x/scale.x+center.x),(double) (factor* delta.y/scale.y+center.y),q,exception); } p+=GetPixelChannels(image); q+=GetPixelChannels(implode_image); } if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ImplodeImage) #endif proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } implode_view=DestroyCacheView(implode_view); interpolate_view=DestroyCacheView(interpolate_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) implode_image=DestroyImage(implode_image); return(implode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The MorphImages() method requires a minimum of two images. The first % image is transformed into the second by a number of intervening images % as specified by frames. % % The format of the MorphImage method is: % % Image *MorphImages(const Image *image,const size_t number_frames, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_frames: Define the number of in-between image to generate. % The more in-between frames, the smoother the morph. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphImages(const Image *image,const size_t number_frames, ExceptionInfo *exception) { #define MorphImageTag "Morph/Image" double alpha, beta; Image *morph_image, *morph_images; MagickBooleanType status; MagickOffsetType scene; register const Image *next; register ssize_t n; ssize_t y; /* Clone first frame in sequence. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); morph_images=CloneImage(image,0,0,MagickTrue,exception); if (morph_images == (Image *) NULL) return((Image *) NULL); if (GetNextImageInList(image) == (Image *) NULL) { /* Morph single image. */ for (n=1; n < (ssize_t) number_frames; n++) { morph_image=CloneImage(image,0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n, number_frames); if (proceed == MagickFalse) status=MagickFalse; } } return(GetFirstImageInList(morph_images)); } /* Morph image sequence. */ status=MagickTrue; scene=0; next=image; for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next)) { for (n=0; n < (ssize_t) number_frames; n++) { CacheView *image_view, *morph_view; beta=(double) (n+1.0)/(double) (number_frames+1.0); alpha=1.0-beta; morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta* GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta* GetNextImageInList(next)->rows+0.5),next->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } status=SetImageStorageClass(morph_image,DirectClass,exception); if (status == MagickFalse) { morph_image=DestroyImage(morph_image); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns, morph_images->rows,GetNextImageInList(next)->filter,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } image_view=AcquireVirtualCacheView(morph_image,exception); morph_view=AcquireAuthenticCacheView(morph_images,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(morph_image,morph_image,morph_image->rows,1) #endif for (y=0; y < (ssize_t) morph_images->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1, exception); q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) morph_images->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++) { PixelChannel channel=GetPixelChannelChannel(morph_image,i); PixelTrait traits=GetPixelChannelTraits(morph_image,channel); PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel); if ((traits == UndefinedPixelTrait) || (morph_traits == UndefinedPixelTrait)) continue; if (((morph_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(morph_images,p) == 0)) { SetPixelChannel(morph_image,channel,p[i],q); continue; } SetPixelChannel(morph_image,channel,ClampToQuantum(alpha* GetPixelChannel(morph_images,channel,q)+beta*p[i]),q); } p+=GetPixelChannels(morph_image); q+=GetPixelChannels(morph_images); } sync=SyncCacheViewAuthenticPixels(morph_view,exception); if (sync == MagickFalse) status=MagickFalse; } morph_view=DestroyCacheView(morph_view); image_view=DestroyCacheView(image_view); morph_image=DestroyImage(morph_image); } if (n < (ssize_t) number_frames) break; /* Clone last frame in sequence. */ morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception); if (morph_image == (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } AppendImageToList(&morph_images,morph_image); morph_images=GetLastImageInList(morph_images); if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphImages) #endif proceed=SetImageProgress(image,MorphImageTag,scene, GetImageListLength(image)); if (proceed == MagickFalse) status=MagickFalse; } scene++; } if (GetNextImageInList(next) != (Image *) NULL) { morph_images=DestroyImageList(morph_images); return((Image *) NULL); } return(GetFirstImageInList(morph_images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P l a s m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PlasmaImage() initializes an image with plasma fractal values. The image % must be initialized with a base color and the random number generator % seeded before this method is called. % % The format of the PlasmaImage method is: % % MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment, % size_t attenuate,size_t depth,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o segment: Define the region to apply plasma fractals values. % % o attenuate: Define the plasma attenuation factor. % % o depth: Limit the plasma recursion depth. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PlasmaPixel(RandomInfo *random_info, const double pixel,const double noise) { Quantum plasma; plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)- noise/2.0); if (plasma <= 0) return((Quantum) 0); if (plasma >= QuantumRange) return(QuantumRange); return(plasma); } static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view, CacheView *u_view,CacheView *v_view,RandomInfo *random_info, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { double plasma; register const Quantum *magick_restrict u, *magick_restrict v; register Quantum *magick_restrict q; register ssize_t i; ssize_t x, x_mid, y, y_mid; if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) && (fabs(segment->y2-segment->y1) <= MagickEpsilon)) return(MagickTrue); if (depth != 0) { MagickBooleanType status; SegmentInfo local_info; /* Divide the area into quadrants and recurse. */ depth--; attenuate++; x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); local_info=(*segment); local_info.x2=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.y1=(double) y_mid; local_info.x2=(double) x_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y2=(double) y_mid; (void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); local_info=(*segment); local_info.x1=(double) x_mid; local_info.y1=(double) y_mid; status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info, &local_info,attenuate,depth,exception); return(status); } x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5); y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5); if ((fabs(segment->x1-x_mid) < MagickEpsilon) && (fabs(segment->x2-x_mid) < MagickEpsilon) && (fabs(segment->y1-y_mid) < MagickEpsilon) && (fabs(segment->y2-y_mid) < MagickEpsilon)) return(MagickFalse); /* Average pixels and apply plasma. */ plasma=(double) QuantumRange/(2.0*attenuate); if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->x2-x_mid) > MagickEpsilon)) { /* Left pixel. */ x=(ssize_t) ceil(segment->x1-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5),1,1, exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5),1,1, exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); if (fabs(segment->x1-segment->x2) > MagickEpsilon) { /* Right pixel. */ x=(ssize_t) ceil(segment->x2-0.5); u=GetCacheViewVirtualPixels(u_view,x,(ssize_t) ceil(segment->y1-0.5), 1,1,exception); v=GetCacheViewVirtualPixels(v_view,x,(ssize_t) ceil(segment->y2-0.5), 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->y1-y_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { if ((fabs(segment->x1-x_mid) > MagickEpsilon) || (fabs(segment->y2-y_mid) > MagickEpsilon)) { /* Bottom pixel. */ y=(ssize_t) ceil(segment->y2-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if (fabs(segment->y1-segment->y2) > MagickEpsilon) { /* Top pixel. */ y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,(ssize_t) ceil(segment->x1-0.5),y, 1,1,exception); v=GetCacheViewVirtualPixels(v_view,(ssize_t) ceil(segment->x2-0.5),y, 1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } } if ((fabs(segment->x1-segment->x2) > MagickEpsilon) || (fabs(segment->y1-segment->y2) > MagickEpsilon)) { /* Middle pixel. */ x=(ssize_t) ceil(segment->x1-0.5); y=(ssize_t) ceil(segment->y1-0.5); u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception); x=(ssize_t) ceil(segment->x2-0.5); y=(ssize_t) ceil(segment->y2-0.5); v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception); q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception); if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) || (q == (Quantum *) NULL)) return(MagickTrue); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PlasmaPixel(random_info,(u[i]+v[i])/2.0,plasma); } (void) SyncCacheViewAuthenticPixels(image_view,exception); } if ((fabs(segment->x2-segment->x1) < 3.0) && (fabs(segment->y2-segment->y1) < 3.0)) return(MagickTrue); return(MagickFalse); } MagickExport MagickBooleanType PlasmaImage(Image *image, const SegmentInfo *segment,size_t attenuate,size_t depth, ExceptionInfo *exception) { CacheView *image_view, *u_view, *v_view; MagickBooleanType status; RandomInfo *random_info; if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); image_view=AcquireAuthenticCacheView(image,exception); u_view=AcquireVirtualCacheView(image,exception); v_view=AcquireVirtualCacheView(image,exception); random_info=AcquireRandomInfo(); status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment, attenuate,depth,exception); random_info=DestroyRandomInfo(random_info); v_view=DestroyCacheView(v_view); u_view=DestroyCacheView(u_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o l a r o i d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PolaroidImage() simulates a Polaroid picture. % % The format of the PolaroidImage method is: % % Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, % const char *caption,const double angle, % const PixelInterpolateMethod method,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o caption: the Polaroid caption. % % o angle: Apply the effect along this angle. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info, const char *caption,const double angle,const PixelInterpolateMethod method, ExceptionInfo *exception) { Image *bend_image, *caption_image, *flop_image, *picture_image, *polaroid_image, *rotate_image, *trim_image; size_t height; ssize_t quantum; /* Simulate a Polaroid picture. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double) image->rows)/25.0,10.0); height=image->rows+2*quantum; caption_image=(Image *) NULL; if (caption != (const char *) NULL) { char geometry[MagickPathExtent], *text; DrawInfo *annotate_info; ImageInfo *image_info; MagickBooleanType status; ssize_t count; TypeMetric metrics; /* Generate caption image. */ caption_image=CloneImage(image,image->columns,1,MagickTrue,exception); if (caption_image == (Image *) NULL) return((Image *) NULL); image_info=AcquireImageInfo(); annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info); text=InterpretImageProperties(image_info,(Image *) image,caption, exception); image_info=DestroyImageInfo(image_info); (void) CloneString(&annotate_info->text,text); count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics, &text,exception); status=SetImageExtent(caption_image,image->columns,(size_t) ((count+1)* (metrics.ascent-metrics.descent)+0.5),exception); if (status == MagickFalse) caption_image=DestroyImage(caption_image); else { caption_image->background_color=image->border_color; (void) SetImageBackgroundColor(caption_image,exception); (void) CloneString(&annotate_info->text,text); (void) FormatLocaleString(geometry,MagickPathExtent,"+0+%g", metrics.ascent); if (annotate_info->gravity == UndefinedGravity) (void) CloneString(&annotate_info->geometry,AcquireString( geometry)); (void) AnnotateImage(caption_image,annotate_info,exception); height+=caption_image->rows; } annotate_info=DestroyDrawInfo(annotate_info); text=DestroyString(text); } picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue, exception); if (picture_image == (Image *) NULL) { if (caption_image != (Image *) NULL) caption_image=DestroyImage(caption_image); return((Image *) NULL); } picture_image->background_color=image->border_color; (void) SetImageBackgroundColor(picture_image,exception); (void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum, quantum,exception); if (caption_image != (Image *) NULL) { (void) CompositeImage(picture_image,caption_image,OverCompositeOp, MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception); caption_image=DestroyImage(caption_image); } (void) QueryColorCompliance("none",AllCompliance, &picture_image->background_color,exception); (void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception); rotate_image=RotateImage(picture_image,90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0* picture_image->columns,method,exception); picture_image=DestroyImage(picture_image); if (bend_image == (Image *) NULL) return((Image *) NULL); picture_image=bend_image; rotate_image=RotateImage(picture_image,-90.0,exception); picture_image=DestroyImage(picture_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); picture_image=rotate_image; picture_image->background_color=image->background_color; polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3, exception); if (polaroid_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } flop_image=FlopImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (flop_image == (Image *) NULL) { picture_image=DestroyImage(picture_image); return(picture_image); } polaroid_image=flop_image; (void) CompositeImage(polaroid_image,picture_image,OverCompositeOp, MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception); picture_image=DestroyImage(picture_image); (void) QueryColorCompliance("none",AllCompliance, &polaroid_image->background_color,exception); rotate_image=RotateImage(polaroid_image,angle,exception); polaroid_image=DestroyImage(polaroid_image); if (rotate_image == (Image *) NULL) return((Image *) NULL); polaroid_image=rotate_image; trim_image=TrimImage(polaroid_image,exception); polaroid_image=DestroyImage(polaroid_image); if (trim_image == (Image *) NULL) return((Image *) NULL); polaroid_image=trim_image; return(polaroid_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e p i a T o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickSepiaToneImage() applies a special effect to the image, similar to the % effect achieved in a photo darkroom by sepia toning. Threshold ranges from % 0 to QuantumRange and is a measure of the extent of the sepia toning. A % threshold of 80% is a good starting point for a reasonable tone. % % The format of the SepiaToneImage method is: % % Image *SepiaToneImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: the tone threshold. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SepiaToneImage(const Image *image,const double threshold, ExceptionInfo *exception) { #define SepiaToneImageTag "SepiaTone/Image" CacheView *image_view, *sepia_view; Image *sepia_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize sepia-toned image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sepia_image=CloneImage(image,0,0,MagickTrue,exception); if (sepia_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse) { sepia_image=DestroyImage(sepia_image); return((Image *) NULL); } /* Tone each row of the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sepia_view=AcquireAuthenticCacheView(sepia_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sepia_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity, tone; intensity=GetPixelIntensity(image,p); tone=intensity > threshold ? (double) QuantumRange : intensity+ (double) QuantumRange-threshold; SetPixelRed(sepia_image,ClampToQuantum(tone),q); tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange : intensity+(double) QuantumRange-7.0*threshold/6.0; SetPixelGreen(sepia_image,ClampToQuantum(tone),q); tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0; SetPixelBlue(sepia_image,ClampToQuantum(tone),q); tone=threshold/7.0; if ((double) GetPixelGreen(image,q) < tone) SetPixelGreen(sepia_image,ClampToQuantum(tone),q); if ((double) GetPixelBlue(image,q) < tone) SetPixelBlue(sepia_image,ClampToQuantum(tone),q); SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(sepia_image); } if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SepiaToneImage) #endif proceed=SetImageProgress(image,SepiaToneImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sepia_view=DestroyCacheView(sepia_view); image_view=DestroyCacheView(image_view); (void) NormalizeImage(sepia_image,exception); (void) ContrastImage(sepia_image,MagickTrue,exception); if (status == MagickFalse) sepia_image=DestroyImage(sepia_image); return(sepia_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d o w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadowImage() simulates a shadow from the specified image and returns it. % % The format of the ShadowImage method is: % % Image *ShadowImage(const Image *image,const double alpha, % const double sigma,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: percentage transparency. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x_offset: the shadow x-offset. % % o y_offset: the shadow y-offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadowImage(const Image *image,const double alpha, const double sigma,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define ShadowImageTag "Shadow/Image" CacheView *image_view; ChannelType channel_mask; Image *border_image, *clone_image, *shadow_image; MagickBooleanType status; PixelInfo background_color; RectangleInfo border_info; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(clone_image,sRGBColorspace,exception); (void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod, exception); border_info.width=(size_t) floor(2.0*sigma+0.5); border_info.height=(size_t) floor(2.0*sigma+0.5); border_info.x=0; border_info.y=0; (void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color, exception); clone_image->alpha_trait=BlendPixelTrait; border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception); clone_image=DestroyImage(clone_image); if (border_image == (Image *) NULL) return((Image *) NULL); if (border_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception); /* Shadow image. */ status=MagickTrue; background_color=border_image->background_color; background_color.alpha_trait=BlendPixelTrait; image_view=AcquireAuthenticCacheView(border_image,exception); for (y=0; y < (ssize_t) border_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) border_image->columns; x++) { if (border_image->alpha_trait != UndefinedPixelTrait) background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0; SetPixelViaPixelInfo(border_image,&background_color,q); q+=GetPixelChannels(border_image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) { border_image=DestroyImage(border_image); return((Image *) NULL); } channel_mask=SetImageChannelMask(border_image,AlphaChannel); shadow_image=BlurImage(border_image,0.0,sigma,exception); border_image=DestroyImage(border_image); if (shadow_image == (Image *) NULL) return((Image *) NULL); (void) SetPixelChannelMask(shadow_image,channel_mask); if (shadow_image->page.width == 0) shadow_image->page.width=shadow_image->columns; if (shadow_image->page.height == 0) shadow_image->page.height=shadow_image->rows; shadow_image->page.width+=x_offset-(ssize_t) border_info.width; shadow_image->page.height+=y_offset-(ssize_t) border_info.height; shadow_image->page.x+=x_offset-(ssize_t) border_info.width; shadow_image->page.y+=y_offset-(ssize_t) border_info.height; return(shadow_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S k e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SketchImage() simulates a pencil sketch. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SketchImage() selects a suitable radius for you. Angle gives the angle % of the sketch. % % The format of the SketchImage method is: % % Image *SketchImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the % center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SketchImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { CacheView *random_view; Image *blend_image, *blur_image, *dodge_image, *random_image, *sketch_image; MagickBooleanType status; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Sketch image. */ random_image=CloneImage(image,image->columns << 1,image->rows << 1, MagickTrue,exception); if (random_image == (Image *) NULL) return((Image *) NULL); status=MagickTrue; random_info=AcquireRandomInfoThreadSet(); random_view=AcquireAuthenticCacheView(random_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(random_image,random_image,random_image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) random_image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) random_image->columns; x++) { double value; register ssize_t i; if (GetPixelWriteMask(random_image,q) == 0) { q+=GetPixelChannels(random_image); continue; } value=GetPseudoRandomValue(random_info[id]); for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=ClampToQuantum(QuantumRange*value); } q+=GetPixelChannels(random_image); } if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse) status=MagickFalse; } random_view=DestroyCacheView(random_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) { random_image=DestroyImage(random_image); return(random_image); } blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception); random_image=DestroyImage(random_image); if (blur_image == (Image *) NULL) return((Image *) NULL); dodge_image=EdgeImage(blur_image,radius,exception); blur_image=DestroyImage(blur_image); if (dodge_image == (Image *) NULL) return((Image *) NULL); (void) NormalizeImage(dodge_image,exception); (void) NegateImage(dodge_image,MagickFalse,exception); (void) TransformImage(&dodge_image,(char *) NULL,"50%",exception); sketch_image=CloneImage(image,0,0,MagickTrue,exception); if (sketch_image == (Image *) NULL) { dodge_image=DestroyImage(dodge_image); return((Image *) NULL); } (void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp, MagickTrue,0,0,exception); dodge_image=DestroyImage(dodge_image); blend_image=CloneImage(image,0,0,MagickTrue,exception); if (blend_image == (Image *) NULL) { sketch_image=DestroyImage(sketch_image); return((Image *) NULL); } if (blend_image->alpha_trait != BlendPixelTrait) (void) SetImageAlpha(blend_image,TransparentAlpha,exception); (void) SetImageArtifact(blend_image,"compose:args","20x80"); (void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue, 0,0,exception); blend_image=DestroyImage(blend_image); return(sketch_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S o l a r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SolarizeImage() applies a special effect to the image, similar to the effect % achieved in a photo darkroom by selectively exposing areas of photo % sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a % measure of the extent of the solarization. % % The format of the SolarizeImage method is: % % MagickBooleanType SolarizeImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the extent of the solarization. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SolarizeImage(Image *image, const double threshold,ExceptionInfo *exception) { #define SolarizeImageTag "Solarize/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); if (image->storage_class == PseudoClass) { register ssize_t i; /* Solarize colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { if ((double) image->colormap[i].red > threshold) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((double) image->colormap[i].green > threshold) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((double) image->colormap[i].blue > threshold) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } } /* Solarize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] > threshold) q[i]=QuantumRange-q[i]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SolarizeImage) #endif proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e g a n o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SteganoImage() hides a digital watermark within the image. Recover % the hidden watermark later to prove that the authenticity of an image. % Offset defines the start position within the image to hide the watermark. % % The format of the SteganoImage method is: % % Image *SteganoImage(const Image *image,Image *watermark, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o watermark: the watermark image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SteganoImage(const Image *image,const Image *watermark, ExceptionInfo *exception) { #define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0) #define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \ | (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i))) #define SteganoImageTag "Stegano/Image" CacheView *stegano_view, *watermark_view; Image *stegano_image; int c; MagickBooleanType status; PixelInfo pixel; register Quantum *q; register ssize_t x; size_t depth, one; ssize_t i, j, k, y; /* Initialize steganographic image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(watermark != (const Image *) NULL); assert(watermark->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); one=1UL; stegano_image=CloneImage(image,0,0,MagickTrue,exception); if (stegano_image == (Image *) NULL) return((Image *) NULL); stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse) { stegano_image=DestroyImage(stegano_image); return((Image *) NULL); } /* Hide watermark in low-order bits of image. */ c=0; i=0; j=0; depth=stegano_image->depth; k=stegano_image->offset; status=MagickTrue; watermark_view=AcquireVirtualCacheView(watermark,exception); stegano_view=AcquireAuthenticCacheView(stegano_image,exception); for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--) { for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++) { for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++) { ssize_t offset; (void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel, exception); offset=k/(ssize_t) stegano_image->columns; if (offset >= (ssize_t) stegano_image->rows) break; q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t) stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1, exception); if (q == (Quantum *) NULL) break; switch (c) { case 0: { SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 1: { SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } case 2: { SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j, GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q); break; } } if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse) break; c++; if (c == 3) c=0; k++; if (k == (ssize_t) (stegano_image->columns*stegano_image->columns)) k=0; if (k == stegano_image->offset) j++; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType) (depth-i),depth); if (proceed == MagickFalse) status=MagickFalse; } } stegano_view=DestroyCacheView(stegano_view); watermark_view=DestroyCacheView(watermark_view); if (status == MagickFalse) stegano_image=DestroyImage(stegano_image); return(stegano_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t e r e o A n a g l y p h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StereoAnaglyphImage() combines two images and produces a single image that % is the composite of a left and right image of a stereo pair. Special % red-green stereo glasses are required to view this effect. % % The format of the StereoAnaglyphImage method is: % % Image *StereoImage(const Image *left_image,const Image *right_image, % ExceptionInfo *exception) % Image *StereoAnaglyphImage(const Image *left_image, % const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o left_image: the left image. % % o right_image: the right image. % % o exception: return any errors or warnings in this structure. % % o x_offset: amount, in pixels, by which the left image is offset to the % right of the right image. % % o y_offset: amount, in pixels, by which the left image is offset to the % bottom of the right image. % % */ MagickExport Image *StereoImage(const Image *left_image, const Image *right_image,ExceptionInfo *exception) { return(StereoAnaglyphImage(left_image,right_image,0,0,exception)); } MagickExport Image *StereoAnaglyphImage(const Image *left_image, const Image *right_image,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { #define StereoImageTag "Stereo/Image" const Image *image; Image *stereo_image; MagickBooleanType status; ssize_t y; assert(left_image != (const Image *) NULL); assert(left_image->signature == MagickCoreSignature); if (left_image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", left_image->filename); assert(right_image != (const Image *) NULL); assert(right_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(right_image != (const Image *) NULL); image=left_image; if ((left_image->columns != right_image->columns) || (left_image->rows != right_image->rows)) ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer"); /* Initialize stereo image attributes. */ stereo_image=CloneImage(left_image,left_image->columns,left_image->rows, MagickTrue,exception); if (stereo_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse) { stereo_image=DestroyImage(stereo_image); return((Image *) NULL); } (void) SetImageColorspace(stereo_image,sRGBColorspace,exception); /* Copy left image to red channel and right image to blue channel. */ status=MagickTrue; for (y=0; y < (ssize_t) stereo_image->rows; y++) { register const Quantum *magick_restrict p, *magick_restrict q; register ssize_t x; register Quantum *magick_restrict r; p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1, exception); q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception); r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) || (r == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) stereo_image->columns; x++) { SetPixelRed(image,GetPixelRed(left_image,p),r); SetPixelGreen(image,GetPixelGreen(right_image,q),r); SetPixelBlue(image,GetPixelBlue(right_image,q),r); if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0) SetPixelAlpha(image,(GetPixelAlpha(left_image,p)+ GetPixelAlpha(right_image,q))/2,r); p+=GetPixelChannels(left_image); q+=GetPixelChannels(right_image); r+=GetPixelChannels(stereo_image); } if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse) break; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y, stereo_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } if (status == MagickFalse) stereo_image=DestroyImage(stereo_image); return(stereo_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S w i r l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SwirlImage() swirls the pixels about the center of the image, where % degrees indicates the sweep of the arc through which each pixel is moved. % You get a more dramatic effect as the degrees move from 1 to 360. % % The format of the SwirlImage method is: % % Image *SwirlImage(const Image *image,double degrees, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o degrees: Define the tightness of the swirling effect. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SwirlImage(const Image *image,double degrees, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define SwirlImageTag "Swirl/Image" CacheView *image_view, *interpolate_view, *swirl_view; Image *swirl_image; MagickBooleanType status; MagickOffsetType progress; double radius; PointInfo center, scale; ssize_t y; /* Initialize swirl image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); swirl_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (swirl_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse) { swirl_image=DestroyImage(swirl_image); return((Image *) NULL); } if (swirl_image->background_color.alpha != OpaqueAlpha) swirl_image->alpha_trait=BlendPixelTrait; /* Compute scaling factor. */ center.x=(double) image->columns/2.0; center.y=(double) image->rows/2.0; radius=MagickMax(center.x,center.y); scale.x=1.0; scale.y=1.0; if (image->columns > image->rows) scale.y=(double) image->columns/(double) image->rows; else if (image->columns < image->rows) scale.x=(double) image->rows/(double) image->columns; degrees=(double) DegreesToRadians(degrees); /* Swirl image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); interpolate_view=AcquireVirtualCacheView(image,exception); swirl_view=AcquireAuthenticCacheView(swirl_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,swirl_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double distance; PointInfo delta; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } delta.y=scale.y*(double) (y-center.y); for (x=0; x < (ssize_t) image->columns; x++) { /* Determine if the pixel is within an ellipse. */ if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(swirl_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(swirl_image); continue; } delta.x=scale.x*(double) (x-center.x); distance=delta.x*delta.x+delta.y*delta.y; if (distance >= (radius*radius)) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait swirl_traits=GetPixelChannelTraits(swirl_image,channel); if ((traits == UndefinedPixelTrait) || (swirl_traits == UndefinedPixelTrait)) continue; SetPixelChannel(swirl_image,channel,p[i],q); } } else { double cosine, factor, sine; /* Swirl the pixel. */ factor=1.0-sqrt((double) distance)/radius; sine=sin((double) (degrees*factor*factor)); cosine=cos((double) (degrees*factor*factor)); status=InterpolatePixelChannels(image,interpolate_view,swirl_image, method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,exception); } p+=GetPixelChannels(image); q+=GetPixelChannels(swirl_image); } if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SwirlImage) #endif proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } swirl_view=DestroyCacheView(swirl_view); interpolate_view=DestroyCacheView(interpolate_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) swirl_image=DestroyImage(swirl_image); return(swirl_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TintImage() applies a color vector to each pixel in the image. The length % of the vector is 0 for black and white and at its maximum for the midtones. % The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5)))) % % The format of the TintImage method is: % % Image *TintImage(const Image *image,const char *blend, % const PixelInfo *tint,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o blend: A color value used for tinting. % % o tint: A color value used for tinting. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TintImage(const Image *image,const char *blend, const PixelInfo *tint,ExceptionInfo *exception) { #define TintImageTag "Tint/Image" CacheView *image_view, *tint_view; double intensity; GeometryInfo geometry_info; Image *tint_image; MagickBooleanType status; MagickOffsetType progress; PixelInfo color_vector; MagickStatusType flags; ssize_t y; /* Allocate tint image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (tint_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse) { tint_image=DestroyImage(tint_image); return((Image *) NULL); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && (IsPixelInfoGray(tint) == MagickFalse)) (void) SetImageColorspace(tint_image,sRGBColorspace,exception); if (blend == (const char *) NULL) return(tint_image); /* Determine RGB values of the color. */ GetPixelInfo(image,&color_vector); flags=ParseGeometry(blend,&geometry_info); color_vector.red=geometry_info.rho; color_vector.green=geometry_info.rho; color_vector.blue=geometry_info.rho; color_vector.alpha=(MagickRealType) OpaqueAlpha; if ((flags & SigmaValue) != 0) color_vector.green=geometry_info.sigma; if ((flags & XiValue) != 0) color_vector.blue=geometry_info.xi; if ((flags & PsiValue) != 0) color_vector.alpha=geometry_info.psi; if (image->colorspace == CMYKColorspace) { color_vector.black=geometry_info.rho; if ((flags & PsiValue) != 0) color_vector.black=geometry_info.psi; if ((flags & ChiValue) != 0) color_vector.alpha=geometry_info.chi; } intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint); color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity); color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity); color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity); color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity); color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity); /* Tint image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); tint_view=AcquireAuthenticCacheView(tint_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,tint_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; double weight; register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait tint_traits=GetPixelChannelTraits(tint_image,channel); if ((traits == UndefinedPixelTrait) || (tint_traits == UndefinedPixelTrait)) continue; if (((tint_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(image,p) == 0)) { SetPixelChannel(tint_image,channel,p[i],q); continue; } } GetPixelInfo(image,&pixel); weight=QuantumScale*GetPixelRed(image,p)-0.5; pixel.red=(double) GetPixelRed(image,p)+color_vector.red*(1.0-(4.0* (weight*weight))); weight=QuantumScale*GetPixelGreen(image,p)-0.5; pixel.green=(double) GetPixelGreen(image,p)+color_vector.green*(1.0-(4.0* (weight*weight))); weight=QuantumScale*GetPixelBlue(image,p)-0.5; pixel.blue=(double) GetPixelBlue(image,p)+color_vector.blue*(1.0-(4.0* (weight*weight))); weight=QuantumScale*GetPixelBlack(image,p)-0.5; pixel.black=(double) GetPixelBlack(image,p)+color_vector.black*(1.0-(4.0* (weight*weight))); SetPixelViaPixelInfo(tint_image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(tint_image); } if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TintImage) #endif proceed=SetImageProgress(image,TintImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } tint_view=DestroyCacheView(tint_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) tint_image=DestroyImage(tint_image); return(tint_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V i g n e t t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % VignetteImage() softens the edges of the image in vignette style. % % The format of the VignetteImage method is: % % Image *VignetteImage(const Image *image,const double radius, % const double sigma,const ssize_t x,const ssize_t y, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o x, y: Define the x and y ellipse offset. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *VignetteImage(const Image *image,const double radius, const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception) { char ellipse[MagickPathExtent]; DrawInfo *draw_info; Image *canvas_image, *blur_image, *oval_image, *vignette_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(canvas_image,DirectClass,exception) == MagickFalse) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } canvas_image->alpha_trait=BlendPixelTrait; oval_image=CloneImage(canvas_image,canvas_image->columns,canvas_image->rows, MagickTrue,exception); if (oval_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } (void) QueryColorCompliance("#000000",AllCompliance, &oval_image->background_color,exception); (void) SetImageBackgroundColor(oval_image,exception); draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke, exception); (void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g," "0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x, image->rows/2.0-y); draw_info->primitive=AcquireString(ellipse); (void) DrawImage(oval_image,draw_info,exception); draw_info=DestroyDrawInfo(draw_info); blur_image=BlurImage(oval_image,radius,sigma,exception); oval_image=DestroyImage(oval_image); if (blur_image == (Image *) NULL) { canvas_image=DestroyImage(canvas_image); return((Image *) NULL); } blur_image->alpha_trait=UndefinedPixelTrait; (void) CompositeImage(canvas_image,blur_image,IntensityCompositeOp,MagickTrue, 0,0,exception); blur_image=DestroyImage(blur_image); vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception); canvas_image=DestroyImage(canvas_image); if (vignette_image != (Image *) NULL) (void) TransformImageColorspace(vignette_image,image->colorspace,exception); return(vignette_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveImage() creates a "ripple" effect in the image by shifting the pixels % vertically along a sine wave whose amplitude and wavelength is specified % by the given parameters. % % The format of the WaveImage method is: % % Image *WaveImage(const Image *image,const double amplitude, % const double wave_length,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o amplitude, wave_length: Define the amplitude and wave length of the % sine wave. % % o interpolate: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *WaveImage(const Image *image,const double amplitude, const double wave_length,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define WaveImageTag "Wave/Image" CacheView *image_view, *wave_view; Image *wave_image; MagickBooleanType status; MagickOffsetType progress; double *sine_map; register ssize_t i; ssize_t y; /* Initialize wave image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0* fabs(amplitude)),MagickTrue,exception); if (wave_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse) { wave_image=DestroyImage(wave_image); return((Image *) NULL); } if (wave_image->background_color.alpha != OpaqueAlpha) wave_image->alpha_trait=BlendPixelTrait; /* Allocate sine map. */ sine_map=(double *) AcquireQuantumMemory((size_t) wave_image->columns, sizeof(*sine_map)); if (sine_map == (double *) NULL) { wave_image=DestroyImage(wave_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) wave_image->columns; i++) sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/ wave_length)); /* Wave image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); wave_view=AcquireAuthenticCacheView(wave_image,exception); (void) SetCacheViewVirtualPixelMethod(image_view, BackgroundVirtualPixelMethod); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,wave_image,wave_image->rows,1) #endif for (y=0; y < (ssize_t) wave_image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) wave_image->columns; x++) { status=InterpolatePixelChannels(image,image_view,wave_image,method, (double) x,(double) (y-sine_map[x]),q,exception); q+=GetPixelChannels(wave_image); } if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WaveImage) #endif proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } wave_view=DestroyCacheView(wave_view); image_view=DestroyCacheView(image_view); sine_map=(double *) RelinquishMagickMemory(sine_map); if (status == MagickFalse) wave_image=DestroyImage(wave_image); return(wave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W a v e l e t D e n o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WaveletDenoiseImage() removes noise from the image using a wavelet % transform. The wavelet transform is a fast hierarchical scheme for % processing an image using a set of consecutive lowpass and high_pass filters, % followed by a decimation. This results in a decomposition into different % scales which can be regarded as different “frequency bands”, determined by % the mother wavelet. Adapted from dcraw.c by David Coffin. % % The format of the WaveletDenoiseImage method is: % % Image *WaveletDenoiseImage(const Image *image,const double threshold, % const double softness,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: set the threshold for smoothing. % % o softness: attenuate the smoothing threshold. % % o exception: return any errors or warnings in this structure. % */ static inline void HatTransform(const float *magick_restrict pixels, const size_t stride,const size_t extent,const size_t scale,float *kernel) { const float *magick_restrict p, *magick_restrict q, *magick_restrict r; register ssize_t i; p=pixels; q=pixels+scale*stride; r=pixels+scale*stride; for (i=0; i < (ssize_t) scale; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q-=stride; r+=stride; } for ( ; i < (ssize_t) (extent-scale); i++) { kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride)); p+=stride; } q=p-scale*stride; r=pixels+stride*(extent-2); for ( ; i < (ssize_t) extent; i++) { kernel[i]=0.25f*(*p+(*p)+(*q)+(*r)); p+=stride; q+=stride; r-=stride; } } MagickExport Image *WaveletDenoiseImage(const Image *image, const double threshold,const double softness,ExceptionInfo *exception) { CacheView *image_view, *noise_view; float *kernel, *pixels; Image *noise_image; MagickBooleanType status; MagickSizeType number_pixels; MemoryInfo *pixels_info; ssize_t channel; static const float noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f, 0.0080f, 0.0044f }; /* Initialize noise image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception); if (noise_image != (Image *) NULL) return(noise_image); #endif noise_image=CloneImage(image,0,0,MagickTrue,exception); if (noise_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse) { noise_image=DestroyImage(noise_image); return((Image *) NULL); } if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); pixels_info=AcquireVirtualMemory(3*image->columns,image->rows* sizeof(*pixels)); kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns), GetOpenMPMaximumThreads()*sizeof(*kernel)); if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL)) { if (kernel != (float *) NULL) kernel=(float *) RelinquishMagickMemory(kernel); if (pixels_info != (MemoryInfo *) NULL) pixels_info=RelinquishVirtualMemory(pixels_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(float *) GetVirtualMemoryBlob(pixels_info); status=MagickTrue; number_pixels=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); noise_view=AcquireAuthenticCacheView(noise_image,exception); for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++) { register ssize_t i; size_t high_pass, low_pass; ssize_t level, y; PixelChannel pixel_channel; PixelTrait traits; if (status == MagickFalse) continue; traits=GetPixelChannelTraits(image,(PixelChannel) channel); if (traits == UndefinedPixelTrait) continue; pixel_channel=GetPixelChannelChannel(image,channel); if ((pixel_channel != RedPixelChannel) && (pixel_channel != GreenPixelChannel) && (pixel_channel != BluePixelChannel)) continue; /* Copy channel from image to wavelet pixel array. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; ssize_t x; p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { pixels[i++]=(float) p[channel]; p+=GetPixelChannels(image); } } /* Low pass filter outputs are called approximation kernel & high pass filters are referred to as detail kernel. The detail kernel have high values in the noisy parts of the signal. */ high_pass=0; for (level=0; level < 5; level++) { double magnitude; ssize_t x, y; low_pass=(size_t) (number_pixels*((level & 0x01)+1)); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t x; p=kernel+id*image->columns; q=pixels+y*image->columns; HatTransform(q+high_pass,1,image->columns,(size_t) (1 << level),p); q+=low_pass; for (x=0; x < (ssize_t) image->columns; x++) *q++=(*p++); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,1) \ magick_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register float *magick_restrict p, *magick_restrict q; register ssize_t y; p=kernel+id*image->rows; q=pixels+x+low_pass; HatTransform(q,image->columns,image->rows,(size_t) (1 << level),p); for (y=0; y < (ssize_t) image->rows; y++) { *q=(*p++); q+=image->columns; } } /* To threshold, each coefficient is compared to a threshold value and attenuated / shrunk by some factor. */ magnitude=threshold*noise_levels[level]; for (i=0; i < (ssize_t) number_pixels; ++i) { pixels[high_pass+i]-=pixels[low_pass+i]; if (pixels[high_pass+i] < -magnitude) pixels[high_pass+i]+=magnitude-softness*magnitude; else if (pixels[high_pass+i] > magnitude) pixels[high_pass+i]-=magnitude-softness*magnitude; else pixels[high_pass+i]*=softness; if (high_pass != 0) pixels[i]+=pixels[high_pass+i]; } high_pass=low_pass; } /* Reconstruct image from the thresholded wavelet kernel. */ i=0; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; ssize_t offset; q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } offset=GetPixelChannelOffset(noise_image,pixel_channel); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType pixel; pixel=(MagickRealType) pixels[i]+pixels[low_pass+i]; q[offset]=ClampToQuantum(pixel); i++; q+=GetPixelChannels(noise_image); } sync=SyncCacheViewAuthenticPixels(noise_view,exception); if (sync == MagickFalse) status=MagickFalse; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType) channel,GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } noise_view=DestroyCacheView(noise_view); image_view=DestroyCacheView(image_view); kernel=(float *) RelinquishMagickMemory(kernel); pixels_info=RelinquishVirtualMemory(pixels_info); if (status == MagickFalse) noise_image=DestroyImage(noise_image); return(noise_image); }
common.h
#pragma once /// MIT License /// Copyright (c) 2021 Thanh Long Le (longlt00502@gmail.com) #if (HAVE_OPENMP) #include <functional> #include <vector> #include <atomic> #include <thread> #include <chrono> #include <fstream> #include <cassert> #include <condition_variable> #include <mutex> #include <cstring> #include <string> #include <omp.h> #include <tll.h> extern bool gPlotting; #define NOP_LOOP() for(int i__=0; i__<0x100; i__++) __asm__("nop") namespace tll::test { template <int num_of_threads> std::pair<size_t, size_t> fifing( const std::function<size_t(int, size_t, size_t, size_t, size_t)> &do_push, /// true == producer const std::function<size_t(int, size_t, size_t, size_t, size_t)> &do_pop, /// true == producer const std::function<bool(int)> &is_prod, /// true == producer const std::function<void()> &do_wait, /// std::this_thread::yield() const std::function<void()> &do_dump, size_t max_size, // std::vector<double> &speed_lst, // std::vector<double> &throughputs, std::vector<double> &time_lst, std::vector<size_t> &tt_count_lst, bool scale=true) { static_assert(num_of_threads > 0); std::atomic<size_t> running_prod{0}, tt_push_count{0}, tt_pop_count{0}, tt_push_size{0}, tt_pop_size{0}; tll::util::Counter<1, std::chrono::duration<double, std::ratio<1, 1>>> counter; /// second // double tt_time; std::atomic<bool> is_done{false}; std::condition_variable cv; std::mutex cv_m; std::thread dumpt = std::thread{[&](){ std::unique_lock<std::mutex> lk(cv_m); while(! cv.wait_for(lk, std::chrono::seconds(10), [&]{return is_done.load();}) ) { do_dump(); } }}; counter.start(); #pragma omp parallel num_threads ( num_of_threads ) { const int kTid = omp_get_thread_num(); size_t loop_num = 0; size_t lc_tt_size = 0; if(is_prod(kTid)) /// Producer { LOGVB("Producer: %s", tll::util::str_tidcpu().data()); running_prod.fetch_add(1); for(;tt_push_size.load(std::memory_order_relaxed) < (max_size);) { size_t ret = do_push(kTid, loop_num, lc_tt_size, tt_push_count.load(std::memory_order_relaxed), tt_push_size.load(std::memory_order_relaxed)); if(ret) { tt_push_count.fetch_add(1, std::memory_order_relaxed); tt_push_size.fetch_add(ret, std::memory_order_relaxed); lc_tt_size+=ret; // NOP_LOOP(); do_wait(); } loop_num++; } running_prod.fetch_add(-1); // LOGD("Prod Done"); } else /// Consumer { LOGVB("Consumer: %s", tll::util::str_tidcpu().data()); for(; running_prod.load(std::memory_order_relaxed) > 0 || (tt_pop_count.load(std::memory_order_relaxed) < tt_push_count.load(std::memory_order_relaxed)) || (tt_pop_size.load(std::memory_order_relaxed) < max_size) ;) { size_t ret = do_pop(kTid, loop_num, lc_tt_size, tt_pop_count.load(std::memory_order_relaxed), tt_pop_size.load(std::memory_order_relaxed)); if(ret) { tt_pop_count.fetch_add(1, std::memory_order_relaxed); tt_pop_size.fetch_add(ret, std::memory_order_relaxed); lc_tt_size+=ret; // NOP_LOOP(); do_wait(); } loop_num++; } // LOGD("Cons Done"); } LOGVB(" - Done: %s", tll::util::str_tidcpu().data()); } double tt_time = counter.elapsed().count(); if(do_push) { tt_count_lst.push_back(tt_push_count.load()); // speed_lst.push_back(tt_push_size.load() / tt_time); // throughputs.push_back(tt_push_count.load() / tt_time); } if(do_pop) { tt_count_lst.push_back(tt_pop_count.load()); // speed_lst.push_back(tt_pop_size.load() / tt_time); // throughputs.push_back(tt_push_count.load() / tt_time); } if(scale) tt_time /= num_of_threads; time_lst.push_back(tt_time); // LOGD("%f", tt_time); is_done.store(true); cv.notify_all(); dumpt.join(); return {tt_push_size.load(), tt_pop_size.load()}; } template <typename T> void plot_data(const std::string &file_name, const std::string &title, const std::string &y_title, std::vector<std::string> &column_lst, const std::vector<int> &x_axes, const std::vector<T> &data) { if(!gPlotting) return; std::ofstream ofs{file_name, std::ios::out | std::ios::binary}; assert(ofs.is_open()); // assert(x_axes.size() == column_lst.size()); // assert(x_axes.size() * column_lst.size() == data.size()); size_t col_size = data.size() / x_axes.size(); if(column_lst.size() < col_size) { for(size_t i=column_lst.size(); i<col_size; i++) { column_lst.push_back(std::to_string(i)); } } // ofs << tll::util::stringFormat("#%d\n", column_lst.size()); // ofs << tll::util::stringFormat("#%d\n", NUM_CPU); // auto col_size = column_lst.size(); // auto x_size = data.size() / col_size; ofs << "#" << col_size << "\n"; ofs << "#" << NUM_CPU << "\n"; ofs << "#" << title << "\n"; ofs << "#" << y_title << "\n"; ofs << "#Number of threads\n"; ofs << "\"\" "; for(int i=0; i<col_size; i++) { ofs << "\"" << column_lst[i] << "\" "; } ofs << "\n"; // LOGD("%ld %ld", col_size, x_axes.size()); for(int i=0; i<x_axes.size(); i++) { ofs << x_axes[i] << " "; for(int j=0; j<col_size; j++) ofs << data[i*col_size + j] << " "; ofs << "\n"; } ofs.flush(); // return; int cmd = 0; cmd = system(tll::util::stringFormat("./plothist.sh %s", file_name.data()).data()); cmd = system(tll::util::stringFormat("./plot.sh %s", file_name.data()).data()); } } /// tll::test #endif
GB_unaryop__minv_int32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int32_uint64 // op(A') function: GB_tran__minv_int32_uint64 // C type: int32_t // A type: uint64_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 32) #define GB_ATYPE \ uint64_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int32_uint64 ( int32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pr79428-5.c
/* PR c/79428 */ /* { dg-options "-fopenmp" } */ #pragma omp ordered /* { dg-error "expected declaration specifiers before end of line" } */
reduction.c
/* 1. A local copy of reduction variable is made and initialized depending on the op(e.g. 0 for +). 2. Compiler finds standard reduction expressions containing op and uses them to update the local copy. 3. Local copies are reduced into a single value and combined with the original global value. */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int main() { int i; long sum=0; int total=100; #pragma omp parallel for reduction(+:sum) for (i=0; i<= total; i++){ sum = sum + i; } long sum0; #pragma omp parallel private(sum0) { sum0=0; #pragma omp for private(i) for (i=0; i<= total; i++) sum0=sum0+i; #pragma omp critical sum = sum + sum0; } printf("sum of 1 to %d = %ld\n",total,sum); return 0; }
gru_utils.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include "lite/arm/math/sgemm.h" namespace paddle { namespace lite { namespace arm { namespace math { template <typename T> struct GRUMetaValue { T* gate_weight; T* state_weight; T* gate_value; T* reset_output_value; T* output_value; T* prev_out_value; }; template <typename Dtype> inline void gru_add_with_bias( const Dtype* din, const Dtype* bias, Dtype* dout, int batch, int size); template <> inline void gru_add_with_bias( const float* din, const float* bias, float* dout, int batch, int size) { #pragma omp parallel for for (int i = 0; i < batch; ++i) { int j = 0; auto din_batch = din + i * size; auto dout_batch = dout + i * size; float32x4_t vb0 = vld1q_f32(bias); float32x4_t vin0 = vld1q_f32(din_batch); float32x4_t vout0; float32x4_t vout1; float32x4_t vin1; float32x4_t vb1; for (; j < size - 7; j += 8) { vin1 = vld1q_f32(din_batch + j + 4); vb1 = vld1q_f32(bias + j + 4); vout0 = vaddq_f32(vb0, vin0); vout1 = vaddq_f32(vb1, vin1); vb0 = vld1q_f32(bias + j + 8); vin0 = vld1q_f32(din_batch + j + 8); vst1q_f32(dout_batch + j, vout0); vst1q_f32(dout_batch + j + 4, vout1); } for (; j < size; ++j) { dout_batch[j] = din_batch[j] + bias[j]; } } } template <lite_api::ActivationType Act> static void gru_unit_reset_act_impl(float* updata_gate, int stride_update, float* reset_gate, int stride_reset, const float* hidden_prev, int stride_hidden_prev, float* reset_hidden_prev, int stride_reset_hidden_prev, int frame_size, int batch_size) { #pragma omp parallel for for (int b = 0; b < batch_size; ++b) { float32x4_t vpre0 = vdupq_n_f32(0.f); float32x4_t vpre1 = vdupq_n_f32(0.f); float prev = 0.f; int i = 0; for (; i < frame_size - 7; i += 8) { float32x4_t vu0 = vld1q_f32(updata_gate + i); float32x4_t vu1 = vld1q_f32(updata_gate + i + 4); float32x4_t vr0 = vld1q_f32(reset_gate + i); float32x4_t vr1 = vld1q_f32(reset_gate + i + 4); float32x4_t vau0 = lite::arm::math::vactive_f32<Act>(vu0); float32x4_t vau1 = lite::arm::math::vactive_f32<Act>(vu1); if (hidden_prev) { vpre0 = vld1q_f32(hidden_prev + i); vpre1 = vld1q_f32(hidden_prev + i + 4); } float32x4_t var0 = lite::arm::math::vactive_f32<Act>(vr0); float32x4_t var1 = lite::arm::math::vactive_f32<Act>(vr1); vst1q_f32(updata_gate + i, vau0); vst1q_f32(updata_gate + i + 4, vau1); float32x4_t vres0 = vmulq_f32(vpre0, var0); float32x4_t vres1 = vmulq_f32(vpre1, var1); vst1q_f32(reset_gate + i, var0); vst1q_f32(reset_gate + i + 4, var1); vst1q_f32(reset_hidden_prev + i, vres0); vst1q_f32(reset_hidden_prev + i + 4, vres1); } for (; i < frame_size; ++i) { updata_gate[i] = lite::arm::math::active_f32<Act>(updata_gate[i]); reset_gate[i] = lite::arm::math::active_f32<Act>(reset_gate[i]); if (hidden_prev) { prev = hidden_prev[i]; } reset_hidden_prev[i] = reset_gate[i] * prev; } updata_gate += stride_update; reset_gate += stride_reset; if (hidden_prev) { hidden_prev += stride_hidden_prev; } reset_hidden_prev += stride_reset_hidden_prev; } } template <lite_api::ActivationType Act> static void gru_unit_out_act_impl(bool origin_mode, float* updata_gate, int stride_update, float* cell_state, int stride_cell_state, const float* hidden_prev, int stride_hidden_prev, float* hidden, int stride_hidden, int frame_size, int batch_size) { #pragma omp parallel for for (int b = 0; b < batch_size; ++b) { float32x4_t vpre0 = vdupq_n_f32(0.f); float32x4_t vpre1 = vdupq_n_f32(0.f); float prev = 0.f; int i = 0; if (origin_mode) { for (; i < frame_size - 7; i += 8) { float32x4_t vc0 = vld1q_f32(cell_state + i); float32x4_t vc1 = vld1q_f32(cell_state + i + 4); float32x4_t vu0 = vld1q_f32(updata_gate + i); float32x4_t vu1 = vld1q_f32(updata_gate + i + 4); float32x4_t vac0 = lite::arm::math::vactive_f32<Act>(vc0); float32x4_t vac1 = lite::arm::math::vactive_f32<Act>(vc1); if (hidden_prev) { vpre0 = vld1q_f32(hidden_prev + i); vpre1 = vld1q_f32(hidden_prev + i + 4); } float32x4_t vh0 = vmlsq_f32(vac0, vu0, vac0); float32x4_t vh1 = vmlsq_f32(vac1, vu1, vac1); vst1q_f32(cell_state + i, vac0); vst1q_f32(cell_state + i + 4, vac1); vh0 = vmlaq_f32(vh0, vu0, vpre0); vh1 = vmlaq_f32(vh1, vu1, vpre1); vst1q_f32(hidden + i, vh0); vst1q_f32(hidden + i + 4, vh1); } for (; i < frame_size; ++i) { if (hidden_prev) { prev = hidden_prev[i]; } cell_state[i] = lite::arm::math::active_f32<Act>(cell_state[i]); hidden[i] = cell_state[i] * (1.f - updata_gate[i]) + updata_gate[i] * prev; } } else { for (; i < frame_size - 7; i += 8) { float32x4_t vc0 = vld1q_f32(cell_state + i); float32x4_t vc1 = vld1q_f32(cell_state + i + 4); float32x4_t vu0 = vld1q_f32(updata_gate + i); float32x4_t vu1 = vld1q_f32(updata_gate + i + 4); float32x4_t vac0 = lite::arm::math::vactive_f32<Act>(vc0); float32x4_t vac1 = lite::arm::math::vactive_f32<Act>(vc1); if (hidden_prev) { vpre0 = vld1q_f32(hidden_prev + i); vpre1 = vld1q_f32(hidden_prev + i + 4); } float32x4_t vh0 = vmlsq_f32(vpre0, vpre0, vu0); float32x4_t vh1 = vmlsq_f32(vpre1, vpre1, vu1); vst1q_f32(cell_state + i, vac0); vst1q_f32(cell_state + i + 4, vac1); vh0 = vmlaq_f32(vh0, vu0, vac0); vh1 = vmlaq_f32(vh1, vu1, vac1); vst1q_f32(hidden + i, vh0); vst1q_f32(hidden + i + 4, vh1); } for (; i < frame_size; ++i) { cell_state[i] = lite::arm::math::active_f32<Act>(cell_state[i]); if (hidden_prev) { prev = hidden_prev[i]; } hidden[i] = prev * (1.f - updata_gate[i]) + updata_gate[i] * cell_state[i]; } } updata_gate += stride_update; cell_state += stride_cell_state; if (hidden_prev) { hidden_prev += stride_hidden_prev; } hidden += stride_hidden; } } inline void gru_unit_reset_act(lite_api::ActivationType act_type, GRUMetaValue<float> value, int frame_size, int batch_size) { auto updata_gate = value.gate_value; auto reset_gate = value.gate_value + frame_size; auto hidden_prev = value.prev_out_value; auto reset_hidden_prev = value.reset_output_value; int stride_update = 3 * frame_size; int stride_reset = 3 * frame_size; int stride_hidden_prev = frame_size; int stride_reset_hidden_prev = frame_size; switch (act_type) { case lite_api::ActivationType::kIndentity: gru_unit_reset_act_impl<lite_api::ActivationType::kIndentity>( updata_gate, stride_update, reset_gate, stride_reset, hidden_prev, stride_hidden_prev, reset_hidden_prev, stride_reset_hidden_prev, frame_size, batch_size); break; case lite_api::ActivationType::kTanh: gru_unit_reset_act_impl<lite_api::ActivationType::kTanh>( updata_gate, stride_update, reset_gate, stride_reset, hidden_prev, stride_hidden_prev, reset_hidden_prev, stride_reset_hidden_prev, frame_size, batch_size); break; case lite_api::ActivationType::kSigmoid: gru_unit_reset_act_impl<lite_api::ActivationType::kSigmoid>( updata_gate, stride_update, reset_gate, stride_reset, hidden_prev, stride_hidden_prev, reset_hidden_prev, stride_reset_hidden_prev, frame_size, batch_size); break; case lite_api::ActivationType::kRelu: gru_unit_reset_act_impl<lite_api::ActivationType::kRelu>( updata_gate, stride_update, reset_gate, stride_reset, hidden_prev, stride_hidden_prev, reset_hidden_prev, stride_reset_hidden_prev, frame_size, batch_size); break; default: break; } } inline void gru_unit_out_act(lite_api::ActivationType act_type, bool origin_mode, GRUMetaValue<float> value, int frame_size, int batch_size) { auto updata_gate = value.gate_value; auto cell_state = value.gate_value + 2 * frame_size; auto hidden_prev = value.prev_out_value; auto hidden = value.output_value; int stride_update = 3 * frame_size; int stride_cell_state = 3 * frame_size; int stride_hidden_prev = frame_size; int stride_hidden = frame_size; switch (act_type) { case lite_api::ActivationType::kIndentity: gru_unit_out_act_impl<lite_api::ActivationType::kIndentity>( origin_mode, updata_gate, stride_update, cell_state, stride_cell_state, hidden_prev, stride_hidden_prev, hidden, stride_hidden, frame_size, batch_size); break; case lite_api::ActivationType::kTanh: gru_unit_out_act_impl<lite_api::ActivationType::kTanh>(origin_mode, updata_gate, stride_update, cell_state, stride_cell_state, hidden_prev, stride_hidden_prev, hidden, stride_hidden, frame_size, batch_size); break; case lite_api::ActivationType::kSigmoid: gru_unit_out_act_impl<lite_api::ActivationType::kSigmoid>( origin_mode, updata_gate, stride_update, cell_state, stride_cell_state, hidden_prev, stride_hidden_prev, hidden, stride_hidden, frame_size, batch_size); break; case lite_api::ActivationType::kRelu: gru_unit_out_act_impl<lite_api::ActivationType::kRelu>(origin_mode, updata_gate, stride_update, cell_state, stride_cell_state, hidden_prev, stride_hidden_prev, hidden, stride_hidden, frame_size, batch_size); break; default: break; } } template <typename T> struct GRUUnitFunctor { static void compute(GRUMetaValue<T> value, int frame_size, int batch_size, const lite_api::ActivationType active_node, const lite_api::ActivationType active_gate, bool origin_mode, ARMContext* ctx) { if (value.prev_out_value) { sgemm(false, false, batch_size, frame_size * 2, frame_size, 1.f, value.prev_out_value, frame_size, value.gate_weight, frame_size * 2, 1.f, value.gate_value, frame_size * 3, nullptr, false, false, ctx); } gru_unit_reset_act(active_gate, value, frame_size, batch_size); if (value.prev_out_value) { sgemm(false, false, batch_size, frame_size, frame_size, 1.f, value.reset_output_value, frame_size, value.state_weight, frame_size, 1.f, value.gate_value + frame_size * 2, frame_size * 3, nullptr, false, false, ctx); } gru_unit_out_act(active_node, origin_mode, value, frame_size, batch_size); } }; } // namespace math } // namespace arm } // namespace lite } // namespace paddle
sinoscope_openmp.c
/* * sinoscope_openmp.c * * Created on: 2011-10-14 * Author: francis */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include "sinoscope.h" #include "color.h" #include "util.h" int sinoscope_image_openmp(sinoscope_t *ptr) { //TODO("sinoscope_image_openmp"); if (ptr == NULL) { return -1; } sinoscope_t b = *ptr; int x, y, index, taylor; struct rgb c; float val, px, py; #pragma omp parallel for private(x, y, c, val, px, py, taylor, index) shared(b) for (x = 1; x < b.height - 1; ++x) { for (y = 1; y < b.width - 1; ++y) { val = 0.0f; px = b.dx * y - 2 * M_PI; py = b.dy * x - 2 * M_PI; for (taylor = 1; taylor <= b.taylor; taylor += 2) { val += sin(px * taylor * b.phase1 + b.time) / taylor + cos(py * taylor * b.phase0) / taylor; } val = (atan(1.0 * val) - atan(-1.0 * val)) / (M_PI); val = (val + 1) * 100; value_color(&c, val, b.interval, b.interval_inv); index = (y * 3) + (x * 3) * b.width; b.buf[index + 0] = c.r; b.buf[index + 1] = c.g; b.buf[index + 2] = c.b; } } return 0; }
conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: quanwang@openailab.com */ #include "conv_kernel_x86.h" #include "wino_conv_kernel_x86.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #if __AVX__ #include <immintrin.h> #endif #ifndef _MSC_VER #include <sys/time.h> #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) #endif static int get_private_mem_size(struct tensor* filter) { if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32 return filter->elem_num * filter->elem_size * 4; else return filter->elem_num * filter->elem_size; // caution } static void interleave(struct tensor* filter, struct conv_priv_info* priv_info) { /* simply copy the data */ memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size); } static void interleave_uint8(struct tensor* filter, struct conv_priv_info* priv_info) { /* dequant uint8 weight to fp32 for simulator */ float* weight_fp32 = (float* )priv_info->interleave_buffer; uint8_t* weight_uint8 = (uint8_t*)filter->data; float scale = filter->scale; int zero_point = filter->zero_point; for (int i = 0; i < filter->elem_num; i++) { weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale; } } void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h, int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw) { const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; *(out++) = *in; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_uint8(uint8_t* data_img, float* data_col, struct tensor* input_tensor, struct tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; float scale = input_tensor->scale; int zero_point = input_tensor->zero_point; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; float* out = data_col + (c * outh + h) * outw; const float* end = out + w_high; if (im_row >= 0 && im_row < inh) { uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(float)); out += w_low; while (out < end) { in += sw; float in_fp32 = ((float)in[0] - (float)zero_point) * scale; out[0] = in_fp32; out++; } memset(out, 0, (outw - w_high) * sizeof(float)); } else { memset(out, 0, outw * sizeof(float)); } } } } void im2col_int8(int8_t* data_img, int8_t* data_col, struct tensor* input_tensor, struct tensor* output_tensor, struct conv_param* param) { int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int inc = param->input_channel / param->group; int sh = param->stride_h; int sw = param->stride_w; int ph = param->pad_h0; int pw = param->pad_w0; int dh = param->dilation_h; int dw = param->dilation_w; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; const int channels_col = ksize_h * ksize_w * inc; for (int c = 0; c < channels_col; ++c) { const int kw = c % ksize_w; int c_ = c / ksize_w; const int kh = c_ % ksize_h; c_ = c_ / ksize_h; const int im_col = kw * dw - pw; const int w_low = max(0, -im_col / sw + (-im_col % sw > 0)); const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0)); for (int h = 0; h < outh; ++h) { const int im_row = kh * dh + h * sh - ph; int8_t * out = data_col + (c * outh + h) * outw; const int8_t * end = out + w_high; if (im_row >= 0 && im_row < inh) { int8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw; memset(out, 0, w_low * sizeof(int8_t)); out += w_low; while (out < end) { in += sw; out[0] = in[0]; out++; } memset(out, 0, (outw - w_high) * sizeof(int8_t)); } else { memset(out, 0, outw * sizeof(int8_t)); } } } } static void im2col_ir(struct tensor* input, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group) { int input_chan = param->input_channel / param->group; int image_size = input->dims[1] * input->dims[2] * input->dims[3]; int group_size = input_chan * input->dims[2] * input->dims[3]; void* input_base = (void*)((uint8_t*)input->data + (n * image_size + group * group_size) * input->elem_size); void* im2col_buf = (void*)priv_info->im2col_buffer; if (input->data_type == TENGINE_DT_FP32) { im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3], param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w); } else if (input->data_type == TENGINE_DT_UINT8) { im2col_uint8(input_base, im2col_buf, input, output, param); } else if (input->data_type == TENGINE_DT_INT8) { im2col_int8(input_base, im2col_buf, input, output, param); } else { TLOG_ERR("Input data type %d not to be supported.\n", input->data_type); } } void input_pack4_fp32(int K, int N, float* pB, float* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const float* img = pB + i; float* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { #if __AVX__ _mm256_storeu_ps(tmp, _mm256_loadu_ps(img)); #else tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; #endif // __SSE__ tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const float* img = pB + i; float* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_fp(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; float* output4 = pC + (i + 4) * N; float* output5 = pC + (i + 5) * N; float* output6 = pC + (i + 6) * N; float* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); __m256 _sum4 = _mm256_set1_ps(0.0); __m256 _sum5 = _mm256_set1_ps(0.0); __m256 _sum6 = _mm256_set1_ps(0.0); __m256 _sum7 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70 va += 8; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41 _sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51 _sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61 _sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71 va += 8; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42 _sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52 _sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62 _sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72 va += 8; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 _va0 = _mm256_broadcast_ss(va + 4); _va1 = _mm256_broadcast_ss(va + 5); _va2 = _mm256_broadcast_ss(va + 6); _va3 = _mm256_broadcast_ss(va + 7); _sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43 _sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53 _sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63 _sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73 va += 8; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _va4 = _mm256_broadcast_ss(va + 4); __m256 _va5 = _mm256_broadcast_ss(va + 5); __m256 _va6 = _mm256_broadcast_ss(va + 6); __m256 _va7 = _mm256_broadcast_ss(va + 7); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 _sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40 _sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50 _sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60 _sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70 va += 8; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); _mm256_storeu_ps(output4, _sum4); _mm256_storeu_ps(output5, _sum5); _mm256_storeu_ps(output6, _sum6); _mm256_storeu_ps(output7, _sum7); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; float sum4[8] = {0}; float sum5[8] = {0}; float sum6[8] = {0}; float sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m256 _sum0_7 = _mm256_set1_ps(0.0); __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _vb1 = _mm256_broadcast_ss(vb + 1); __m256 _vb2 = _mm256_broadcast_ss(vb + 2); __m256 _vb3 = _mm256_broadcast_ss(vb + 3); __m256 _va0 = _mm256_loadu_ps(va); __m256 _va1 = _mm256_loadu_ps(va + 8); __m256 _va2 = _mm256_loadu_ps(va + 16); __m256 _va3 = _mm256_loadu_ps(va + 24); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00 _sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10 _sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20 _sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30 va += 32; vb += 4; } _sum0 = _mm256_add_ps(_sum0, _sum1); _sum2 = _mm256_add_ps(_sum2, _sum3); _sum0_7 = _mm256_add_ps(_sum0_7, _sum0); _sum0_7 = _mm256_add_ps(_sum0_7, _sum2); for (; k < K; k++) { __m256 _vb0 = _mm256_broadcast_ss(vb); __m256 _va = _mm256_loadu_ps(va); _sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00 va += 8; vb += 1; } float output_sum0_7[8] = {0.f}; _mm256_storeu_ps(output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; float sum4 = 0; float sum5 = 0; float sum6 = 0; float sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif // __AVX__ output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; float* output0 = pC + ( i )*N; float* output1 = pC + (i + 1) * N; float* output2 = pC + (i + 2) * N; float* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); __m256 _sum1 = _mm256_set1_ps(0.0); __m256 _sum2 = _mm256_set1_ps(0.0); __m256 _sum3 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; // k1 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01 _sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11 _sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21 _sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31 va += 4; // k2 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02 _sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12 _sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22 _sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32 va += 4; // k3 _va0 = _mm256_broadcast_ss(va); _va1 = _mm256_broadcast_ss(va + 1); _va2 = _mm256_broadcast_ss(va + 2); _va3 = _mm256_broadcast_ss(va + 3); _sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03 _sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13 _sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23 _sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10 _sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20 _sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30 va += 4; vb += 8; } _mm256_storeu_ps(output0, _sum0); _mm256_storeu_ps(output1, _sum1); _mm256_storeu_ps(output2, _sum2); _mm256_storeu_ps(output3, _sum3); #else float sum0[8] = {0}; float sum1[8] = {0}; float sum2[8] = {0}; float sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif // __AVX__ output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if __AVX__ __m128 _sum0_3 = _mm_set1_ps(0.0); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va + 4); __m128 _va2 = _mm_loadu_ps(va + 8); __m128 _va3 = _mm_loadu_ps(va + 12); _sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00 _sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10 _sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20 _sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k < K; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00 va += 4; vb += 1; } float output_sum0_3[4] = {0.f}; _mm_storeu_ps(output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else float sum0 = 0; float sum1 = 0; float sum2 = 0; float sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __AVX__ output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { float* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8) * 8 * K; #if __AVX__ __m256 _sum0 = _mm256_set1_ps(0.0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _va1 = _mm256_broadcast_ss(va + 1); __m256 _va2 = _mm256_broadcast_ss(va + 2); __m256 _va3 = _mm256_broadcast_ss(va + 3); __m256 _vb0 = _mm256_loadu_ps(vb); __m256 _vb1 = _mm256_loadu_ps(vb + 8); __m256 _vb2 = _mm256_loadu_ps(vb + 16); __m256 _vb3 = _mm256_loadu_ps(vb + 24); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 _sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01 _sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02 _sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03 va += 4; vb += 32; } for (; k < K; k++) { // k0 __m256 _va0 = _mm256_broadcast_ss(va); __m256 _vb0 = _mm256_loadu_ps(vb); _sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00 va += 1; vb += 8; } _mm256_storeu_ps(output, _sum0); #else float sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif // __AVX__ output += 8; } for (; j < N; j++) { float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; float* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; #if __AVX__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k + 3 < K; k += 4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } #ifdef _WIN32 float sum0 = _sum0.m128_f32[0] + _sum0.m128_f32[1] + _sum0.m128_f32[2] + _sum0.m128_f32[3]; #else float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #endif #else float sum0 = 0.f; #endif // __AVX__ for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } void input_pack4_int8(int K, int N, int8_t* pB, int8_t* pB_t, int num_thread) { int nn_size = N >> 3; int remian_size_start = nn_size << 3; // [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....] #pragma omp parallel for num_threads(num_thread) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 8; const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp[1] = img[1]; tmp[2] = img[2]; tmp[3] = img[3]; tmp[4] = img[4]; tmp[5] = img[5]; tmp[6] = img[6]; tmp[7] = img[7]; tmp += 8; img += N; } } // [ch00, ch01, ch02, ch03 ....] #pragma omp parallel for num_threads(num_thread) for (int i = remian_size_start; i < N; i++) { const int8_t* img = pB + i; int8_t* tmp = pB_t + (i / 8 + i % 8) * 8 * K; for (int j = 0; j < K; j++) { tmp[0] = img[0]; tmp += 1; img += N; } } } static void sgemm_i8(int M, int N, int K, int8_t* pA_t, int8_t* pB_t, int32_t* pC, int num_thread) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = M >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(num_thread) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 8; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int32_t* output4 = pC + (i + 4) * N; int32_t* output5 = pC + (i + 5) * N; int32_t* output6 = pC + (i + 6) * N; int32_t* output7 = pC + (i + 7) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); __m256i _sum4 = _mm256_set1_epi32(0); __m256i _sum5 = _mm256_set1_epi32(0); __m256i _sum6 = _mm256_set1_epi32(0); __m256i _sum7 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum7); va += 8; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum7); va += 8; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum7); va += 8; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); _va0 = _mm256_set1_epi32(*(va + 4)); _va1 = _mm256_set1_epi32(*(va + 5)); _va2 = _mm256_set1_epi32(*(va + 6)); _va3 = _mm256_set1_epi32(*(va + 7)); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum7); va += 8; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _va4 = _mm256_set1_epi32(*(va + 4)); __m256i _va5 = _mm256_set1_epi32(*(va + 5)); __m256i _va6 = _mm256_set1_epi32(*(va + 6)); __m256i _va7 = _mm256_set1_epi32(*(va + 7)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); _sum4 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va4), _sum4); _sum5 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va5), _sum5); _sum6 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va6), _sum6); _sum7 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va7), _sum7); va += 8; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); _mm256_storeu_si256((__m256i* )output4, _sum4); _mm256_storeu_si256((__m256i* )output5, _sum5); _mm256_storeu_si256((__m256i* )output6, _sum6); _mm256_storeu_si256((__m256i* )output7, _sum7); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; int32_t sum4[8] = {0}; int32_t sum5[8] = {0}; int32_t sum6[8] = {0}; int32_t sum7[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; sum4[n] += va[4] * vb[n]; sum5[n] += va[5] * vb[n]; sum6[n] += va[6] * vb[n]; sum7[n] += va[7] * vb[n]; } va += 8; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; output4[n] = sum4[n]; output5[n] = sum5[n]; output6[n] = sum6[n]; output7[n] = sum7[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; output4 += 8; output5 += 8; output6 += 8; output7 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_7 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 16))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va += 32; vb += 4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum0); _sum0_7 = _mm256_add_epi32(_sum0_7, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_7 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_7); va += 8; vb += 1; } int32_t output_sum0_7[8] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_7, _sum0_7); output0[0] = output_sum0_7[0]; output1[0] = output_sum0_7[1]; output2[0] = output_sum0_7[2]; output3[0] = output_sum0_7[3]; output4[0] = output_sum0_7[4]; output5[0] = output_sum0_7[5]; output6[0] = output_sum0_7[6]; output7[0] = output_sum0_7[7]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; int32_t sum4 = 0; int32_t sum5 = 0; int32_t sum6 = 0; int32_t sum7 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; sum4 += va[4] * vb[0]; sum5 += va[5] * vb[0]; sum6 += va[6] * vb[0]; sum7 += va[7] * vb[0]; va += 8; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output4[0] = sum4; output5[0] = sum5; output6[0] = sum6; output7[0] = sum7; #endif output0++; output1++; output2++; output3++; output4++; output5++; output6++; output7++; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int i = remain_outch_start + pp * 4; int32_t* output0 = pC + ( i )*N; int32_t* output1 = pC + (i + 1) * N; int32_t* output2 = pC + (i + 2) * N; int32_t* output3 = pC + (i + 3) * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = K + 4) { // k0 __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; // k1 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va3), _sum3); va += 4; // k2 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va3), _sum3); va += 4; // k3 _va0 = _mm256_set1_epi32(*va); _va1 = _mm256_set1_epi32(*(va + 1)); _va2 = _mm256_set1_epi32(*(va + 2)); _va3 = _mm256_set1_epi32(*(va + 3)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum3); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va3), _sum3); va += 4; vb += 8; } _mm256_storeu_si256((__m256i* )output0, _sum0); _mm256_storeu_si256((__m256i* )output1, _sum1); _mm256_storeu_si256((__m256i* )output2, _sum2); _mm256_storeu_si256((__m256i* )output3, _sum3); #else int32_t sum0[8] = {0}; int32_t sum1[8] = {0}; int32_t sum2[8] = {0}; int32_t sum3[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 8; } for (int n = 0; n < 8; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } #endif output0 += 8; output1 += 8; output2 += 8; output3 += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0_3 = _mm256_set1_epi32(0); __m256i _sum0 = _mm256_set1_epi32(0); __m256i _sum1 = _mm256_set1_epi32(0); __m256i _sum2 = _mm256_set1_epi32(0); __m256i _sum3 = _mm256_set1_epi32(0); int k=0; for (; k + 3 < K; k = k + 4) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _vb1 = _mm256_set1_epi32(*(vb + 1)); __m256i _vb2 = _mm256_set1_epi32(*(vb + 2)); __m256i _vb3 = _mm256_set1_epi32(*(vb + 3)); __m256i _va0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); __m256i _va1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 4))); __m256i _va2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 8))); __m256i _va3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(va + 12))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_va0, _vb0), _sum0); _sum1 = _mm256_add_epi32(_mm256_mullo_epi32(_va1, _vb1), _sum1); _sum2 = _mm256_add_epi32(_mm256_mullo_epi32(_va2, _vb2), _sum2); _sum3 = _mm256_add_epi32(_mm256_mullo_epi32(_va3, _vb3), _sum3); va+=16; vb+=4; } _sum0 = _mm256_add_epi32(_sum0, _sum1); _sum2 = _mm256_add_epi32(_sum2, _sum3); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum0); _sum0_3 = _mm256_add_epi32(_sum0_3, _sum2); for (; k < K; k++) { __m256i _vb0 = _mm256_set1_epi32(*vb); __m256i _va = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)va)); _sum0_3 = _mm256_add_epi32(_mm256_mullo_epi32(_va, _vb0), _sum0_3); va += 4; vb += 1; } //drop last 4 value int32_t output_sum0_3[4] = {0}; _mm256_storeu_si256((__m256i* )output_sum0_3, _sum0_3); output0[0] = output_sum0_3[0]; output1[0] = output_sum0_3[1]; output2[0] = output_sum0_3[2]; output3[0] = output_sum0_3[3]; #else int32_t sum0 = 0; int32_t sum1 = 0; int32_t sum2 = 0; int32_t sum3 = 0; for (int k = 0; k < K; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif output0++; output1++; output2++; output3++; } } remain_outch_start += nn_outch << 2; // output ch0 for (int i = remain_outch_start; i < M; i++) { int32_t* output = pC + i * N; int j = 0; for (; j + 7 < N; j += 8) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8) * 8 * K; #if 0 //__AVX__ __m256i _sum0 = _mm256_set1_epi32(0); int k = 0; for (; k + 3 < K; k = k + 4) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _va1 = _mm256_set1_epi32(*(va + 1)); __m256i _va2 = _mm256_set1_epi32(*(va + 2)); __m256i _va3 = _mm256_set1_epi32(*(va + 3)); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); __m256i _vb1 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 8))); __m256i _vb2 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 16))); __m256i _vb3 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)(vb + 24))); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb1, _va1), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb2, _va2), _sum0); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb3, _va3), _sum0); va += 4; vb += 32; } for (; k < K; k++) { __m256i _va0 = _mm256_set1_epi32(*va); __m256i _vb0 = _mm256_cvtepi8_epi32(_mm_loadu_si128((__m128i*)vb)); _sum0 = _mm256_add_epi32(_mm256_mullo_epi32(_vb0, _va0), _sum0); va += 1; vb += 8; } _mm256_storeu_si256((__m256i* )output, _sum0); #else int32_t sum[8] = {0}; for (int k = 0; k < K; k++) { for (int n = 0; n < 8; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 8; } for (int n = 0; n < 8; n++) { output[n] = sum[n]; } #endif output += 8; } for (; j < N; j++) { int8_t* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K; int8_t* vb = pB_t + (j / 8 + j % 8) * 8 * K; int k = 0; int32_t sum0 = 0.f; for (; k < K; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } static void sgemm_fp32(struct tensor* input, struct tensor* filter, struct tensor* bias, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; float* bias_fp32 = NULL; if (bias) bias_fp32 = ( float* )bias->data + outchan_g * group; float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = output_fp32; sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); // process bias if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_fp32[output_off] += bias_fp32[i]; } } } // process activation relu if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; } } } // process activation relu6 if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_fp32[output_off] < 0) output_fp32[output_off] = 0; if (output_fp32[output_off] > 6) output_fp32[output_off] = 6; } } } } static void sgemm_uint8(struct tensor* input, struct tensor* filter, struct tensor* bias, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4; uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int* bias_int32 = NULL; float bias_scale = 0.f; if (bias) { bias_int32 = ( int* )bias->data + outchan_g * group; bias_scale = input->scale * filter->scale; } float* filter_sgemm = interleave_fp32; float* input_sgemm_pack4 = im2col_pack4_fp32; float* output_sgemm = (float*)sys_malloc((unsigned long)outchan_g * out_h * out_w * sizeof(float)); sgemm_fp(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread); /* process bias */ if (bias) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; output_sgemm[output_off] += (float )bias_int32[i] * bias_scale; } } } /* process activation relu */ if (param->activation == 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm[output_off] < 0) output_sgemm[output_off] = 0; if (output_sgemm[output_off] > 6) output_sgemm[output_off] = 6; } } } /* quant from fp32 to uint8 */ for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point); if (udata > 255) udata = 255; else if (udata < 0) udata = 0; output_uint8[output_off] = udata; } } sys_free(output_sgemm); } static void sgemm_int8(struct tensor* input, struct tensor* filter, struct tensor* bias, struct tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n, int group, int num_thread) { int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group; int outchan_g = param->output_channel / param->group; int out_h = output->dims[2]; int out_w = output->dims[3]; int out_image_size = output->dims[1] * output->dims[2] * output->dims[3]; int8_t* interleave_int8 = ( int8_t* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size; int8_t* im2col_pack4_int8 = priv_info->im2col_buffer_pack4; int8_t * output_int8 = ( int8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w; int32_t * bias_int32 = NULL; if (bias) bias_int32 = ( int* )bias->data + outchan_g * group; float input_scale = input->scale; float* kernel_scales = filter->scale_list; float output_scale = output->scale; int8_t* filter_sgemm = interleave_int8; int8_t* input_sgemm_pack4 = im2col_pack4_int8; int32_t* output_sgemm_int32 = (int32_t*)sys_malloc((unsigned long)outchan_g * out_h * out_w * sizeof(int32_t)); float* output_sgemm_fp32 = (float*)sys_malloc((unsigned long)outchan_g * out_h * out_w * sizeof(float)); sgemm_i8(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm_int32, num_thread); /* process bias and dequant output from int32 to fp32 */ #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (bias) output_sgemm_fp32[output_off] = (float )(output_sgemm_int32[output_off] + bias_int32[i]) * input_scale * kernel_scales[i]; else output_sgemm_fp32[output_off] = (float )output_sgemm_int32[output_off] * input_scale * kernel_scales[i]; } } /* process activation relu */ if (param->activation == 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; } } } /* process activation relu6 */ if (param->activation > 0) { #pragma omp parallel for num_threads(num_thread) for (int i = 0; i < outchan_g; i++) { for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; if (output_sgemm_fp32[output_off] < 0) output_sgemm_fp32[output_off] = 0; if (output_sgemm_fp32[output_off] > 6) output_sgemm_fp32[output_off] = 6; } } } /* quant from fp32 to int8 */ for (int i = 0; i < outchan_g; i++) { #pragma omp parallel for num_threads(num_thread) for (int j = 0; j < out_h * out_w; j++) { int output_off = i * (out_h * out_w) + j; int32_t data_i32 = ( int32_t )(round(output_sgemm_fp32[output_off] / output_scale)); if (data_i32 > 127) data_i32 = 127; else if (data_i32 < -127) data_i32 = -127; output_int8[output_off] = (int8_t)data_i32; } } sys_free(output_sgemm_int32); sys_free(output_sgemm_fp32); } /* check the conv wheather need to be using winograd */ static int winograd_support(struct conv_param* param, int in_h, int in_w) { int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int input_chan = param->input_channel; int output_chan = param->output_channel; int group = param->group; if (in_h <= 10 && in_w <= 10) return 0; if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 || dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16) return 0; return 1; } int conv_hcl_get_shared_mem_size(struct tensor* input, struct tensor* output, struct conv_param* param) { int group = param->group; int input_chan = param->input_channel / group; int kernel_size = input_chan * param->kernel_h * param->kernel_w; int output_xy = output->dims[2] * output->dims[3]; int elem_size = input->elem_size; // simulator uint8 inference with fp32 if (input->data_type == TENGINE_DT_UINT8) elem_size = 4; return elem_size * output_xy * kernel_size; } int conv_hcl_get_shared_pack4_mem_size(struct tensor* filter, struct tensor* output, struct conv_param* param) { int K = filter->elem_num / filter->dims[0]; int N = output->dims[2] * output->dims[3]; int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; return (8 * K * (N / 8 + N % 8)) * elem_size; } int conv_hcl_get_interleave_pack4_size(int M, int K, struct tensor* filter) { int elem_size = filter->elem_size; // simulator uint8 inference with fp32 if (filter->data_type == TENGINE_DT_UINT8) elem_size = 4; int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size; return size; } void conv_hcl_interleave_pack4_fp32(int M, int K, struct conv_priv_info* priv_info) { float* pA = ( float* )priv_info->interleave_buffer; float* pA_t = ( float* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; const float* k4 = pA + (p + 4) * K; const float* k5 = pA + (p + 5) * K; const float* k6 = pA + (p + 6) * K; const float* k7 = pA + (p + 7) * K; float* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const float* k0 = pA + (p + 0) * K; const float* k1 = pA + (p + 1) * K; const float* k2 = pA + (p + 2) * K; const float* k3 = pA + (p + 3) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const float* k0 = pA + (p + 0) * K; float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } void conv_hcl_interleave_pack4_int8(int M, int K, struct conv_priv_info* priv_info) { int8_t* pA = ( int8_t * )priv_info->interleave_buffer; int8_t* pA_t = ( int8_t* )priv_info->interleave_buffer_pack4; int nn_outch = M >> 3; int remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; const int8_t* k4 = pA + (p + 4) * K; const int8_t* k5 = pA + (p + 5) * K; const int8_t* k6 = pA + (p + 6) * K; const int8_t* k7 = pA + (p + 7) * K; int8_t* ktmp = pA_t + (p / 8) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp[4] = k4[0]; ktmp[5] = k5[0]; ktmp[6] = k6[0]; ktmp[7] = k7[0]; ktmp += 8; k0 += 1; k1 += 1; k2 += 1; k3 += 1; k4 += 1; k5 += 1; k6 += 1; k7 += 1; } } nn_outch = (M - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; const int8_t* k0 = pA + (p + 0) * K; const int8_t* k1 = pA + (p + 1) * K; const int8_t* k2 = pA + (p + 2) * K; const int8_t* k3 = pA + (p + 3) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < M; p++) { const int8_t* k0 = pA + (p + 0) * K; int8_t* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K; for (int q = 0; q < K; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } int conv_hcl_prerun(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; /* check winograd implement, only for conv3x3s1 */ if (input_tensor->data_type == TENGINE_DT_FP32) { priv_info->winograd = winograd_support(param, in_h, in_w); if (priv_info->winograd) { return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param); } } if (!priv_info->external_im2col_mem) { int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; } if (!priv_info->external_im2col_pack4_mem) { int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param); void* mem = sys_malloc(mem_size); priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; } if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (input_tensor->data_type == TENGINE_DT_UINT8) interleave_uint8(filter_tensor, priv_info); else interleave(filter_tensor, priv_info); if (priv_info->external_interleave_pack4_mem) { int M = filter_tensor->dims[0]; int K = filter_tensor->elem_num / filter_tensor->dims[0]; int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer_pack4 = mem; priv_info->interleave_buffer_pack4_size = mem_size; if (input_tensor->data_type == TENGINE_DT_FP32 || input_tensor->data_type == TENGINE_DT_UINT8) conv_hcl_interleave_pack4_fp32(M, K, priv_info); else conv_hcl_interleave_pack4_int8(M, K, priv_info); if (!priv_info->external_interleave_mem && priv_info->interleave_buffer) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } } else { priv_info->interleave_buffer_pack4 = priv_info->interleave_buffer; priv_info->interleave_buffer_pack4_size = priv_info->interleave_buffer_size; } return 0; } int conv_hcl_postrun(struct conv_priv_info* priv_info) { if (priv_info->winograd) { return wino_conv_hcl_postrun(priv_info); } if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL) { sys_free(priv_info->im2col_buffer); priv_info->im2col_buffer = NULL; } if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL) { sys_free(priv_info->im2col_buffer_pack4); priv_info->im2col_buffer_pack4 = NULL; } if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL) { sys_free(priv_info->interleave_buffer_pack4); priv_info->interleave_buffer_pack4 = NULL; } return 0; } int conv_hcl_run(struct tensor* input_tensor, struct tensor* filter_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { int group = param->group; int type = input_tensor->data_type; if (priv_info->winograd) { return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread, cpu_affinity); } for (int i = 0; i < input_tensor->dims[0]; i++) // batch size { for (int j = 0; j < group; j++) { im2col_ir(input_tensor, output_tensor, priv_info, param, i, j); int K = filter_tensor->elem_num / filter_tensor->dims[0]; int N = output_tensor->dims[2] * output_tensor->dims[3]; void* im2col_buffer = priv_info->im2col_buffer; if (priv_info->external_interleave_pack4_mem) { if (type == TENGINE_DT_FP32 || type == TENGINE_DT_UINT8) input_pack4_fp32(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); else input_pack4_int8(K, N, im2col_buffer, priv_info->im2col_buffer_pack4, num_thread); } else { priv_info->im2col_buffer_pack4 = im2col_buffer; } if (type == TENGINE_DT_FP32) sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_UINT8) sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else if (type == TENGINE_DT_INT8) sgemm_int8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread); else { TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type); return -1; } } } return 0; } int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_mem = 1; priv_info->im2col_buffer = mem; priv_info->im2col_buffer_size = mem_size; return 0; } int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size) { priv_info->external_im2col_pack4_mem = 1; priv_info->im2col_buffer_pack4 = mem; priv_info->im2col_buffer_pack4_size = mem_size; return 0; }
gesummv.c
/** * gesummv.c: This file was adapted from PolyBench/GPU 1.0 test * suite to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" // define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size. */ #ifdef RUN_TEST #define SIZE 1100 #elif RUN_BENCHMARK #define SIZE 9600 #else #define SIZE 1000 #endif #define N SIZE /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i, j; for (i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < N; j++) { tmp[i] = A[i * N + j] * x[j] + tmp[i]; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void gesummv_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { #pragma omp target map(to : A[ : N *N], B[ : N *N], x[ : N], tmp[ : N]) map(tofrom : y[ : N]) device(DEVICE_ID) #pragma omp teams distribute parallel for for (int i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (int j = 0; j < N; j++) { tmp[i] = A[i * N + j] * x[j] + tmp[i]; y[i] = B[i * N + j] * x[j] + y[i]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void init(DATA_TYPE *A, DATA_TYPE *x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE)i) / N; for (j = 0; j < N; j++) { A[i * N + j] = ((DATA_TYPE)i * j) / N; } } } int compareResults(DATA_TYPE *y, DATA_TYPE *y_outputFromGpu) { int i, fail; fail = 0; for (i = 0; i < (N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f " "Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); return fail; } int main(int argc, char *argv[]) { double t_start, t_end; int fail = 0; DATA_TYPE *A; DATA_TYPE *B; DATA_TYPE *x; DATA_TYPE *y; DATA_TYPE *y_outputFromGpu; DATA_TYPE *tmp; A = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE)); B = (DATA_TYPE *)malloc(N * N * sizeof(DATA_TYPE)); x = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); y = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE *)calloc(N, sizeof(DATA_TYPE)); tmp = (DATA_TYPE *)malloc(N * sizeof(DATA_TYPE)); fprintf(stdout, "<< Scalar, Vector and Matrix Multiplication >>\n"); init(A, x); t_start = rtclock(); gesummv_OMP(A, B, x, y_outputFromGpu, tmp); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); #ifdef RUN_TEST t_start = rtclock(); gesummv(A, B, x, y, tmp); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); fail = compareResults(y, y_outputFromGpu); #endif free(A); free(B); free(x); free(y); free(y_outputFromGpu); free(tmp); return fail; }
GB_unaryop__ainv_uint32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_fp32 // op(A') function: GB_tran__ainv_uint32_fp32 // C type: uint32_t // A type: float // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint32_t z ; GB_CAST_UNSIGNED(z,x,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_fp32 ( uint32_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pbkdf2-hmac-sha1_fmt_plug.c
/* * This software is Copyright (c) 2013 magnum and it is hereby released to * the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_pbkdf2_hmac_sha1; #elif FMT_REGISTERS_H john_register_one(&fmt_pbkdf2_hmac_sha1); #else #include <ctype.h> #include <string.h> #include <assert.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "johnswap.h" #include "base64_convert.h" #include "stdint.h" #include "pbkdf2_hmac_sha1.h" #include "pbkdf2_hmac_common.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "PBKDF2-HMAC-SHA1" #ifdef SIMD_COEF_32 #define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME #else #define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR #endif #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(ARCH_WORD_32) #ifdef SIMD_COEF_32 #define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1 #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #define PAD_SIZE 64 #define PLAINTEXT_LENGTH 125 static struct custom_salt { unsigned int length; unsigned int rounds; unsigned int use_utf8; //unsigned int outlen; /* Not used yet */ unsigned char salt[PBKDF2_32_MAX_SALT_SIZE]; } *cur_salt; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[PBKDF2_SHA1_BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *p; int saltlen; memset(&cs, 0, sizeof(cs)); ciphertext += PBKDF2_SHA1_TAG_LEN; cs.use_utf8 = ciphertext[13] == 'S'; cs.rounds = atou(ciphertext); ciphertext = strchr(ciphertext, '$') + 1; p = strchr(ciphertext, '$'); saltlen = 0; memset(cs.salt, 0, sizeof(cs.salt)); while (ciphertext < p) { /** extract salt **/ cs.salt[saltlen++] = atoi16[ARCH_INDEX(ciphertext[0])] * 16 + atoi16[ARCH_INDEX(ciphertext[1])]; ciphertext += 2; } cs.length = saltlen; return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { #ifdef SSE_GROUP_SZ_SHA1 int lens[SSE_GROUP_SZ_SHA1], i; unsigned char *pin[SSE_GROUP_SZ_SHA1]; union { ARCH_WORD_32 *pout[SSE_GROUP_SZ_SHA1]; unsigned char *poutc; } x; for (i = 0; i < SSE_GROUP_SZ_SHA1; ++i) { lens[i] = strlen(saved_key[index+i]); pin[i] = (unsigned char*)saved_key[index+i]; x.pout[i] = crypt_out[index+i]; } pbkdf2_sha1_sse((const unsigned char **)pin, lens, cur_salt->salt, cur_salt->length, cur_salt->rounds, &(x.poutc), PBKDF2_SHA1_BINARY_SIZE, 0); #else pbkdf2_sha1((const unsigned char*)(saved_key[index]), strlen(saved_key[index]), cur_salt->salt, cur_salt->length, cur_salt->rounds, (unsigned char*)crypt_out[index], PBKDF2_SHA1_BINARY_SIZE, 0); #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; #if defined(_OPENMP) || MAX_KEYS_PER_CRYPT > 1 for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], PBKDF2_SHA1_BINARY_SIZE); } static void set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } /* Check the FULL binary, just for good measure. There is no chance we'll have a false positive here but this function is not performance sensitive. */ static int cmp_exact(char *source, int index) { return pbkdf2_hmac_sha1_cmp_exact(get_key(index), source, cur_salt->salt, cur_salt->length, cur_salt->rounds); } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->rounds; } struct fmt_main fmt_pbkdf2_hmac_sha1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, PBKDF2_SHA1_BINARY_SIZE, PBKDF2_32_BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE, { "iteration count", }, pbkdf2_hmac_sha1_common_tests }, { init, done, fmt_default_reset, pbkdf2_hmac_sha1_prepare, pbkdf2_hmac_sha1_valid, pbkdf2_hmac_sha1_split, pbkdf2_hmac_sha1_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
declare-target-3.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ #pragma omp declare target int a[] = { 1, 2, 3 }; extern int b[]; /* { dg-error "'b' in declare target directive does not have mappable type" } */ extern int c[]; /* { dg-error "'c' in declare target directive does not have mappable type" } */ extern int d[]; /* { dg-error "'d' in declare target directive does not have mappable type" } */ int d[3]; #pragma omp end declare target int c[3]; int e[] = { 1, 2, 3 }; #pragma omp declare target to (e) extern int f[]; #pragma omp declare target to (f) /* { dg-error "'f' does not have a mappable type in 'to' clause" } */ extern int g[]; #pragma omp declare target to (g) /* { dg-error "'g' does not have a mappable type in 'to' clause" } */ int g[3]; extern int h[]; int h[3]; #pragma omp declare target to (h) int i[] = { 1, 2, 3 }; int j[] = { 1, 2, 3 }; extern int k[]; extern int l[]; extern int m[]; extern int n[]; extern int o[]; extern int p[]; int k[3]; int l[3]; int q; void foo (void) { #pragma omp target update to (q) to (i) #pragma omp target map (tofrom: j) ; #pragma omp target update from (q) from (k) #pragma omp target map (to: l) ; #pragma omp target update from (q) from (m) /* { dg-error "'m' does not have a mappable type in 'from' clause" } */ #pragma omp target map (from: n) /* { dg-error "'n' does not have a mappable type in 'map' clause" } */ ; #pragma omp target update to (q) to (o) /* { dg-error "'o' does not have a mappable type in 'to' clause" } */ #pragma omp target map (from: p) /* { dg-error "'p' does not have a mappable type in 'map' clause" } */ ; } int o[3]; int p[3];
quick.c
// Quick sort in C #include <stdio.h> #include <time.h> #include <omp.h> #define getClock() ((double)clock()/CLOCKS_PER_SEC) double begin,end; // function to swap elements void swap(int *a, int *b) { int t = *a; *a = *b; *b = t; } // function to find the partition position int partition(int array[], int low, int high) { // select the rightmost element as pivot int pivot = array[high]; // pointer for greater element int i = (low - 1); // traverse each element of the array // compare them with the pivot #pragma omp parallel for schedule(static,1) for (int j = low; j < high; j++) { if (array[j] <= pivot) { // if element smaller than pivot is found // swap it with the greater element pointed by i i++; // swap element at i with element at j swap(&array[i], &array[j]); } } // swap the pivot element with the greater element at i swap(&array[i + 1], &array[high]); // return the partition point return (i + 1); } void quickSort(int array[], int low, int high) { if (low < high) { // find the pivot element such that // elements smaller than pivot are on left of pivot // elements greater than pivot are on right of pivot int pi = partition(array, low, high); // recursive call on the left of pivot quickSort(array, low, pi - 1); // recursive call on the right of pivot quickSort(array, pi + 1, high); } } // function to print array elements void printArray(int array[], int size) { for (int i = 0; i < size; ++i) { printf("%d ", array[i]); } printf("\n"); } // main function int main() { FILE *myFile; myFile = fopen("dataset.txt","r"); int data[100000]; int i; for (i=0;i<100000;i++){ fscanf(myFile,"%d",&data[i]); } int n = sizeof(data)/sizeof(data[0]); // perform quicksort on data begin = getClock(); quickSort(data, 0, n - 1); end = getClock(); printf("Sorted array in ascending order: \n"); printArray(data, n); printf("The quicksort took %f seconds to complete\t",end-begin); }
pi_instrumented.c
/*****************************************************************************\ * ANALYSIS PERFORMANCE TOOLS * * Extrae * * Instrumentation package for parallel applications * ***************************************************************************** * ___ This library is free software; you can redistribute it and/or * * / __ modify it under the terms of the GNU LGPL as published * * / / _____ by the Free Software Foundation; either version 2.1 * * / / / \ of the License, or (at your option) any later version. * * ( ( ( B S C ) * * \ \ \_____/ This library is distributed in hope that it will be * * \ \__ useful but WITHOUT ANY WARRANTY; without even the * * \___ implied warranty of MERCHANTABILITY or FITNESS FOR A * * PARTICULAR PURPOSE. See the GNU LGPL for more details. * * * * You should have received a copy of the GNU Lesser General Public License * * along with this library; if not, write to the Free Software Foundation, * * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA * * The GNU LEsser General Public License is contained in the file COPYING. * * --------- * * Barcelona Supercomputing Center - Centro Nacional de Supercomputacion * \*****************************************************************************/ #include <stdio.h> #include <omp.h> #include <math.h> #include "extrae_user_events.h" void do_work(void) { int i; int n = 1000000; double PI25DT = 3.141592653589793238462643; double pi, h, area, x; h = 1.0 / (double) n; area = 0.0; #pragma omp parallel for private(x) reduction(+:area) for (i = 1; i <= n; i++) { x = h * ((double)i - 0.5); area += (4.0 / (1.0 + x*x)); } pi = h * area; printf("pi (by using #pragma omp parallel for) is approximately %.16f, Error is %.16f\n",pi,fabs(pi - PI25DT)); #pragma omp parallel { #pragma omp barrier fprintf (stdout, "In a barrier\n"); #pragma omp barrier } #pragma omp parallel { #pragma omp critical (foo) printf ("critical foo\n"); #pragma omp critical (bar) printf ("critical bar\n"); #pragma omp critical (foo) printf ("critical foo (again)\n"); #pragma omp critical printf ("critical unnamed\n"); } h = 1.0 / (double) n; area = 0.0; #pragma omp parallel sections private(i,x) reduction(+:area) { #pragma omp section for (i = 1; i < n/2; i++) { x = h * ((double)i - 0.5); area += (4.0 / (1.0 + x*x)); } #pragma omp section for (i = n/2; i <= n; i++) { x = h * ((double)i - 0.5); area += (4.0 / (1.0 + x*x)); } } pi = h * area; printf("pi (by using #pragma omp parallel sections) is approximately %.16f, Error is %.16f\n",pi,fabs(pi - PI25DT)); } int main(int argc, char **argv) { /* Extrae_init() must be called before any #pragma omp or OMP call */ Extrae_init(); do_work(); /* Extre_fini() must be the last call */ Extrae_fini(); }
GB_binop__bxor_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bxor_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__bxor_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__bxor_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__bxor_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bxor_uint64) // A*D function (colscale): GB (_AxD__bxor_uint64) // D*A function (rowscale): GB (_DxB__bxor_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__bxor_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__bxor_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxor_uint64) // C=scalar+B GB (_bind1st__bxor_uint64) // C=scalar+B' GB (_bind1st_tran__bxor_uint64) // C=A+scalar GB (_bind2nd__bxor_uint64) // C=A'+scalar GB (_bind2nd_tran__bxor_uint64) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij) ^ (bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) ^ (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BXOR || GxB_NO_UINT64 || GxB_NO_BXOR_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bxor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bxor_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bxor_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bxor_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bxor_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bxor_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bxor_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bxor_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bxor_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bxor_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bxor_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x) ^ (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bxor_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) ^ (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) ^ (aij) ; \ } GrB_Info GB (_bind1st_tran__bxor_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) ^ (y) ; \ } GrB_Info GB (_bind2nd_tran__bxor_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__bget_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__bget_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_uint16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__bget_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_uint16) // C=scalar+B GB (_bind1st__bget_uint16) // C=scalar+B' GB (_bind1st_tran__bget_uint16) // C=A+scalar GB (_bind2nd__bget_uint16) // C=A'+scalar GB (_bind2nd_tran__bget_uint16) // C type: uint16_t // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = GB_BITGET (aij, bij, uint16_t, 16) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITGET (x, y, uint16_t, 16) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_UINT16 || GxB_NO_BGET_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bget_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bget_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITGET (x, bij, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITGET (aij, y, uint16_t, 16) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (x, aij, uint16_t, 16) ; \ } GrB_Info GB (_bind1st_tran__bget_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITGET (aij, y, uint16_t, 16) ; \ } GrB_Info GB (_bind2nd_tran__bget_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ast-dump-openmp-ordered.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one() { #pragma omp ordered ; } void test_two(int x) { #pragma omp for ordered for (int i = 0; i < x; i++) ; } void test_three(int x) { #pragma omp for ordered(1) for (int i = 0; i < x; i++) { #pragma omp ordered depend(source) } } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-ordered.c:3:1, line:6:1> line:3:6 test_one 'void ()' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:17, line:6:1> // CHECK-NEXT: | `-OMPOrderedDirective {{.*}} <line:4:1, col:20> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-NullStmt {{.*}} <col:3> // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-ordered.c:4:1) *const restrict' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:8:1, line:12:1> line:8:6 test_two 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:12:1> // CHECK-NEXT: | `-OMPForDirective {{.*}} <line:9:1, col:24> // CHECK-NEXT: | |-OMPOrderedClause {{.*}} <col:17, col:24> // CHECK-NEXT: | | `-<<<NULL>>> // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:10:3, line:11:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | | |-ForStmt {{.*}} <line:10:3, line:11:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:10:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:11:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:9:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-ordered.c:9:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:10:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:14:1, line:19:1> line:14:6 test_three 'void (int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:24, line:19:1> // CHECK-NEXT: `-OMPForDirective {{.*}} <line:15:1, col:27> // CHECK-NEXT: |-OMPOrderedClause {{.*}} <col:17, col:26> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:25> 'int' // CHECK-NEXT: | |-value: Int 1 // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:25> 'int' 1 // CHECK-NEXT: `-CapturedStmt {{.*}} <line:16:3, line:18:3> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: | |-ForStmt {{.*}} <line:16:3, line:18:3> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:16:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-CompoundStmt {{.*}} <col:31, line:18:3> // CHECK-NEXT: | | `-OMPOrderedDirective {{.*}} <line:17:1, col:35> openmp_standalone_directive // CHECK-NEXT: | | `-OMPDependClause {{.*}} <col:21, <invalid sloc>> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:15:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-ordered.c:15:1) *const restrict' // CHECK-NEXT: | `-VarDecl {{.*}} <line:16:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: `-DeclRefExpr {{.*}} <col:3> 'int' lvalue ParmVar {{.*}} 'x' 'int'
problem1.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #include <stdint.h> #include "../timer.h" //------------------------------------------------------------------------------------------------------------------------------ void initialize_problem(domain_type *domain, int level, double hLevel, double a, double b){ double NPi = 2.0*M_PI; double Bmin = 1.0; double Bmax = 10.0; double c2 = (Bmax-Bmin)/2; double c1 = (Bmax+Bmin)/2; double c3=10.0; // how sharply (B)eta transitions double c4 = -5.0/0.25; int box; for(box=0;box<domain->subdomains_per_rank;box++){ memset(domain->subdomains[box].levels[level].grids[__u_exact],0,domain->subdomains[box].levels[level].volume*sizeof(double)); memset(domain->subdomains[box].levels[level].grids[__f ],0,domain->subdomains[box].levels[level].volume*sizeof(double)); int i,j,k; #pragma omp parallel for private(k,j,i) collapse(2) for(k=0;k<domain->subdomains[box].levels[level].dim.k;k++){ for(j=0;j<domain->subdomains[box].levels[level].dim.j;j++){ for(i=0;i<domain->subdomains[box].levels[level].dim.i;i++){ double x = hLevel*((double)(i+domain->subdomains[box].levels[level].low.i)+0.5); double y = hLevel*((double)(j+domain->subdomains[box].levels[level].low.j)+0.5); double z = hLevel*((double)(k+domain->subdomains[box].levels[level].low.k)+0.5); int ijk = (i+domain->subdomains[box].levels[level].ghosts)+ domain->subdomains[box].levels[level].pencil*(j+domain->subdomains[box].levels[level].ghosts)+ domain->subdomains[box].levels[level].plane *(k+domain->subdomains[box].levels[level].ghosts); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - double r2 = pow((x-0.50),2) + pow((y-0.50),2) + pow((z-0.50),2); // distance from center squared double r2x = 2.0*(x-0.50); double r2y = 2.0*(y-0.50); double r2z = 2.0*(z-0.50); double r2xx = 2.0; double r2yy = 2.0; double r2zz = 2.0; double r = pow(r2,0.5); double rx = 0.5*r2x*pow(r2,-0.5); double ry = 0.5*r2y*pow(r2,-0.5); double rz = 0.5*r2z*pow(r2,-0.5); double rxx = 0.5*r2xx*pow(r2,-0.5) - 0.25*r2x*r2x*pow(r2,-1.5); double ryy = 0.5*r2yy*pow(r2,-0.5) - 0.25*r2y*r2y*pow(r2,-1.5); double rzz = 0.5*r2zz*pow(r2,-0.5) - 0.25*r2z*r2z*pow(r2,-1.5); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - // beta (B) is a bubble... double A = 1.0; double B = c1+c2*tanh( c3*(r-0.25) ); double Bx = c2*c3*rx*(1-pow(tanh( c3*(r-0.25) ),2)); double By = c2*c3*ry*(1-pow(tanh( c3*(r-0.25) ),2)); double Bz = c2*c3*rz*(1-pow(tanh( c3*(r-0.25) ),2)); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - double u = exp(c4*r2)*sin(NPi*x)*sin(NPi*y)*sin(NPi*z); double ux = c4*r2x*u + NPi*exp(c4*r2)*cos(NPi*x)*sin(NPi*y)*sin(NPi*z); double uy = c4*r2y*u + NPi*exp(c4*r2)*sin(NPi*x)*cos(NPi*y)*sin(NPi*z); double uz = c4*r2z*u + NPi*exp(c4*r2)*sin(NPi*x)*sin(NPi*y)*cos(NPi*z); double uxx = c4*r2xx*u + c4*r2x*ux + c4*r2x*NPi*exp(c4*r2)*cos(NPi*x)*sin(NPi*y)*sin(NPi*z) - NPi*NPi*exp(c4*r2)*sin(NPi*x)*sin(NPi*y)*sin(NPi*z); double uyy = c4*r2yy*u + c4*r2y*uy + c4*r2y*NPi*exp(c4*r2)*sin(NPi*x)*cos(NPi*y)*sin(NPi*z) - NPi*NPi*exp(c4*r2)*sin(NPi*x)*sin(NPi*y)*sin(NPi*z); double uzz = c4*r2zz*u + c4*r2z*uz + c4*r2z*NPi*exp(c4*r2)*sin(NPi*x)*sin(NPi*y)*cos(NPi*z) - NPi*NPi*exp(c4*r2)*sin(NPi*x)*sin(NPi*y)*sin(NPi*z); double f = a*A*u - b*( (Bx*ux + By*uy + Bz*uz) + B*(uxx + uyy + uzz) ); //- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - domain->subdomains[box].levels[level].grids[__alpha ][ijk] = A; domain->subdomains[box].levels[level].grids[__beta ][ijk] = B; domain->subdomains[box].levels[level].grids[__u_exact][ijk] = u; domain->subdomains[box].levels[level].grids[__f ][ijk] = f; }}} } double average_value_of_f = mean(domain,level,__f); if(domain->rank==0){printf("\n average value of f = %20.12e\n",average_value_of_f);fflush(stdout);} if(a!=0){ shift_grid(domain,level,__f ,__f ,-average_value_of_f); shift_grid(domain,level,__u_exact,__u_exact,-average_value_of_f/a); } // what if a==0 and average_value_of_f != 0 ??? } //------------------------------------------------------------------------------------------------------------------------------
GB_unaryop__minv_int8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_int32 // op(A') function: GB_tran__minv_int8_int32 // C type: int8_t // A type: int32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ int32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_int32 ( int8_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pi_vec.c
//--------------------------------------------------------------------------- // Name: pi_vec // // Purpose: This function will numerically compute the integral of // // 4/(1+x*x) // // from 0 to 1. The value of this integral is pi. // // In this version of the program, we unroll the loop by four // and use SSE 128 bit instructions on values of type packed // float (ps). We access SSE using the interface defined in // xmmintrin.h and with g++. // // Results: On a Mac OS X MacBook Pro with OS X 10.6.4 and an an Intel // Core 2 Duo CPU at 2.4 GHz with 2 GB 667 MHz DDRw SDRAM // with only a single thread for all CPU results (g++ -m64 -O3) // // pi_reg 4000000 3.141593 0.080886 secs // pi_unroll 4000000 3.141593 0.033658 secs // pi_sse 4000000 3.141349 0.008818 secs // // But the pi_reg and pi_unroll used double internally. // Switching to float: // // pi_reg 4000000 3.125990 0.042709 secs // pi_unroll 4000000 3.141348 0.051934 secs // pi_sse 4000000 3.141349 0.008776 secs // // That the answers are so unstable for float is not surprising // given that an unordered sum is suseptible to round-off errors. // This effect is worse as the number of steps increases, so I // tested just a couple data points for fewer steps: // // pi_reg 1000000 3.1413 0.0111 secs // pi_sse 1000000 3.1415 0.0023 secs // // To compare to the OCL vector results (pi_vocl), we checked // the results with NSTEPS = 512*512*16 (and for vocl, ITERS // equal to NSTEPS/(512*4) so we have 4 work groups of 512 // work items // // pi_vocl 4194304 3.141592 0.010563 secs GPU // pi_reg 4194304 3.125117 0.044999 secs // pi_unroll 4194304 3.140426 0.054423 secs // pi_sse 4194304 3.140426 0.009209 secs // // NOTES: We assue that the number of steps are a multiple of 4. // // HISTORY: Written by Tim Mattson, July 2010 //--------------------------------------------------------------------------- #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <immintrin.h> #define NSTEPS (512*512*32) #define NTRIALS 50 #define MAX_THREADS 4 #define DONE 1 #define BIG 10000000.0 #define SMALL 0.00000001 //--------------------------------------------------------------------------- // timer function //--------------------------------------------------------------------------- #define wtime omp_get_wtime //--------------------------------------------------------------------------- // The various pi programs //--------------------------------------------------------------------------- float pi_reg_default(int num_steps) // literals take default type .. double { int i, j; float step, x, pi, sum = 0.0; double start_time, run_time; double min, max, ave; min = BIG; max = SMALL; ave = 0.0; step = 1.0/((float) num_steps); for (j=0;j<NTRIALS;j++){ start_time = wtime(); sum = 0.0; for (i=1;i<= num_steps; i++){ x = (i-0.5)*step; sum = sum + 4.0/(1.0+x*x); } pi = step * sum; run_time = wtime() - start_time; ave+=run_time; if(run_time<min)min=run_time; if(run_time>max)max=run_time; } //end of loop over trials printf("\n Base default %d steps %f: ave=%f min=%f max=%f secs ", num_steps,pi,(ave/(double)NTRIALS),min,max); return pi; } float pi_reg_float(int num_steps) // literals explicity made float { int i, j; // double step, x, pi, sum = 0.0; float step, x, pi, sum = 0.0; double start_time, run_time; double min, max, ave; min = BIG; max = SMALL; ave = 0.0; step = 1.0f/((float) num_steps); for (j=0;j<NTRIALS;j++){ start_time = wtime(); sum = 0.0f; for (i=1;i<= num_steps; i++){ x = (i-0.5f)*step; sum = sum + 4.0f/(1.0f+x*x); } pi = step * sum; run_time = wtime() - start_time; ave+=run_time; if(run_time<min)min=run_time; if(run_time>max)max=run_time; } //end of loop over trials printf("\n Base float %d steps %f: ave=%f min=%f max=%f secs ", num_steps,pi,(ave/(double)NTRIALS),min,max); return pi; } // unroll loop four times float pi_unroll(int num_steps) { int i,j; float step, x0, x1, x2, x3, pi, sum = 0.0; double start_time, run_time; double min, max, ave; min = BIG; max = SMALL; ave = 0.0; step = 1.0/(float) num_steps; for(j=0;j<NTRIALS;j++){ start_time = wtime(); sum=0.0; for (i=1;i<= num_steps; i=i+4){ x0 = (i-0.5f)*step; x1 = (i+0.5f)*step; x2 = (i+1.5f)*step; x3 = (i+2.5f)*step; sum = sum + 4.0f*(1.0f/(1.0f+x0*x0) + 1.0f/(1.0f+x1*x1) + 1.0f/(1.0f+x2*x2) + 1.0f/(1.0f+x3*x3)); } pi = step * sum; run_time = wtime() - start_time; ave+=run_time; if(run_time<min)min=run_time; if(run_time>max)max=run_time; } //end of loop over trials printf("\n Unroll 4 %d steps %f: ave=%f min=%f max=%f secs ", num_steps,pi,ave/NTRIALS,min,max); return pi; } // unroll loop, use SSE for loop body float pi_sse(int num_steps) { int i,j; float step, pi; double start_time, run_time; float scalar_one = 1.0, scalar_zero = 0.0; float ival, scalar_four = 4.0; float vsum[4]; double min, max, ave; min = BIG; max = SMALL; ave = 0.0; step = 1.0/(float) num_steps; for (j=0;j<NTRIALS;j++){ start_time = wtime(); __m128 ramp = _mm_setr_ps(0.5, 1.5, 2.5, 3.5); __m128 one = _mm_load1_ps(&scalar_one); __m128 four = _mm_load1_ps(&scalar_four); __m128 vstep = _mm_load1_ps(&step); __m128 sum = _mm_load1_ps(&scalar_zero); __m128 xvec; __m128 denom; __m128 eye; // unroll loop 4 times ... assume num_steps%4 = 0 for (i=0;i< num_steps; i=i+4){ ival = (float)i; eye = _mm_load1_ps(&ival); xvec = _mm_mul_ps(_mm_add_ps(eye,ramp),vstep); denom = _mm_add_ps(_mm_mul_ps(xvec,xvec),one); sum = _mm_add_ps(_mm_div_ps(four,denom),sum); } _mm_store_ps(&vsum[0],sum); pi = step * (vsum[0]+vsum[1]+vsum[2]+vsum[3]); run_time = wtime() - start_time; ave+=run_time; if(run_time<min)min=run_time; if(run_time>max)max=run_time; } printf("\n SSE %d steps %f: ave=%f min=%f max=%f secs ", num_steps,pi,ave/NTRIALS,min,max); return pi; } // unroll loop, use SSE for loop body and parallel omp float pi_sse_par(int num_steps) { int k,j; float step, local_sum[MAX_THREADS],pi; double start_time, run_time; double min, max, ave; min = BIG; max = SMALL; ave = 0.0; step = 1.0/(float) num_steps; for (j=0;j<NTRIALS;j++){ start_time = wtime(); pi = 0.0f; for (k=0; k<MAX_THREADS; k++) local_sum[k] = 0.0; #pragma omp parallel { int i,ID=omp_get_thread_num(); float scalar_one = 1.0, scalar_zero = 0.0; float ival, scalar_four = 4.0; float vsum[4]; __m128 ramp = _mm_setr_ps(0.5, 1.5, 2.5, 3.5); __m128 one = _mm_load1_ps(&scalar_one); __m128 four = _mm_load1_ps(&scalar_four); __m128 vstep = _mm_load1_ps(&step); __m128 sum = _mm_load1_ps(&scalar_zero); __m128 xvec; __m128 denom; __m128 eye; // unroll loop 4 times ... assume num_steps%4 = 0 #pragma omp for for (i=0;i< num_steps; i=i+4){ ival = (float)i; eye = _mm_load1_ps(&ival); xvec = _mm_mul_ps(_mm_add_ps(eye,ramp),vstep); denom = _mm_add_ps(_mm_mul_ps(xvec,xvec),one); sum = _mm_add_ps(_mm_div_ps(four,denom),sum); } _mm_store_ps(&vsum[0],sum); local_sum[ID] = step * (vsum[0]+vsum[1]+vsum[2]+vsum[3]); } for(k=0;k<MAX_THREADS;k++) pi += local_sum[k]; run_time = wtime() - start_time; ave+=run_time; if(run_time<min)min=run_time; if(run_time>max)max=run_time; } printf("\n SSE + OMP par %d steps %f: ave=%f min=%f max=%f secs ", num_steps,pi,ave/NTRIALS,min,max); return pi; } float pi_omp_simd (int num_steps) { int i, j; // double step, x, pi, sum = 0.0; float step, x, x0, pi, sum = 0.0; double start_time, run_time; double min, max, ave; min = BIG; max = SMALL; ave = 0.0; step = 1.0/((float) num_steps); for (j=0;j<NTRIALS;j++){ start_time = wtime(); sum = 0.0f; x0 = -0.5*step; #pragma omp simd private(x) reduction(+:sum) for (i=1;i<= num_steps; i++){ x = x0 + (float)(i)*step; sum = sum + 4.0f/(1.0f+x*x); } pi = step * sum; run_time = wtime() - start_time; ave+=run_time; if(run_time<min)min=run_time; if(run_time>max)max=run_time; } //end of loop over trials printf("\n OMP SIMD %d steps %f: ave=%f min=%f max=%f secs ", num_steps,pi,(ave/(double)NTRIALS),min,max); return pi; } float pi_omp_par_simd (int num_steps) { int i, j; // double step, x, pi, sum = 0.0; float step, x, x0, pi, sum = 0.0; double start_time, run_time; double min, max, ave; min = BIG; max = SMALL; ave = 0.0; step = 1.0/((float) num_steps); for (j=0;j<NTRIALS;j++){ start_time = wtime(); sum = 0.0f; x0 = -0.5f*step; #pragma omp parallel for simd private(x) reduction(+:sum) for (i=1;i<= num_steps; i++){ x = x0 + (float)(i)*step; sum = sum + 4.0f/(1.0f+x*x); } pi = step * sum; run_time = wtime() - start_time; ave+=run_time; if(run_time<min)min=run_time; if(run_time>max)max=run_time; } //end of loop over trials printf("\n OMP par SIMD %d steps %f: ave=%f min=%f max=%f secs ", num_steps,pi,(ave/(double)NTRIALS),min,max); return pi; } // unroll loop, use AVX for loop body and parallel omp /* <<<< skip this function. Not all compilers support AVX >>> float pi_avx(int num_steps) { int k,j; float step, local_sum[MAX_THREADS],pi; double start_time, run_time; double min, max, ave; min = BIG; max = SMALL; ave = 0.0; step = 1.0/(float) num_steps; for (j=0;j<NTRIALS;j++){ start_time = wtime(); pi = 0.0f; for (k=0; k<MAX_THREADS; k++) local_sum[k] = 0.0; #pragma omp parallel { int i,ID=omp_get_thread_num(); float scalar_one = 1.0, scalar_zero = 0.0; float ival, scalar_four = 4.0; // float vsum[8]; void *vsum_space; float *vsum; int r = posix_memalign(&vsum_space, 32, 8*sizeof(float)); vsum = (float *)vsum_space; // float vsum[8] __attribute__((aligned(32))); printf("\n%d vsum address %08x %ld\n",j,(unsigned)&vsum[0],((long)&vsum[0]%32)); __m256 ramp = _mm256_setr_ps(0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5); __m256 one = _mm256_load_ps(&scalar_one); __m256 four = _mm256_load_ps(&scalar_four); __m256 vstep = _mm256_load_ps(&step); __m256 sum = _mm256_load_ps(&scalar_zero); __m256 xvec; __m256 denom; __m256 eye; // unroll loop 8 times ... assume num_steps%8 = 0 #pragma omp for for (i=0;i< num_steps; i=i+8){ ival = (float)i; eye = _mm256_load_ps(&ival); xvec = _mm256_mul_ps(_mm256_add_ps(eye,ramp),vstep); denom = _mm256_add_ps(_mm256_mul_ps(xvec,xvec),one); sum = _mm256_add_ps(_mm256_div_ps(four,denom),sum); } // // This causes a segmentation fault // _mm256_store_ps(&vsum[0],sum); local_sum[ID] = step * (vsum[0]+vsum[1]+vsum[2]+vsum[3]+ vsum[4]+vsum[5]+vsum[6]+vsum[7]); } for(k=0;k<MAX_THREADS;k++) pi += local_sum[k]; run_time = wtime() - start_time; ave+=run_time; if(run_time<min)min=run_time; if(run_time>max)max=run_time; } printf("\n AVX + OMP par %d steps %f: ave=%f min=%f max=%f secs ", num_steps,pi,ave/NTRIALS,min,max); return pi; } <<<< skip pi AVX function >>>>>>> */ int main() { int num_steps=NSTEPS; // turn off dynamic mode ... so the same number of threads // are used for each parallel region omp_set_dynamic(0); float pi_val1 = pi_reg_default (num_steps); float pi_val2 = pi_reg_float (num_steps); float pi_val3 = pi_unroll (num_steps); float pi_val4 = pi_sse (num_steps); float pi_val7 = pi_sse_par (num_steps); float pi_val5 = pi_omp_simd (num_steps); float pi_val6 = pi_omp_par_simd(num_steps); // float pi_val8 = pi_avx (num_steps); printf("\n"); #pragma omp parallel { #pragma omp master printf(" %d threads used when parallel.\n",omp_get_num_threads()); } }
common.h
#ifndef SPLITSPECTRUM_COMMON_H #define SPLITSPECTRUM_COMMON_H #include <time.h> #include <sys/time.h> #include <omp.h> double getWallTime() { struct timeval time; if (gettimeofday(&time,NULL)) { // Handle error return 0; } return (double)time.tv_sec + (double)time.tv_usec * .000001; } int numberOfThreads() { int nthreads = 0; #pragma omp parallel if (omp_get_thread_num() == 1) { nthreads = omp_get_num_threads(); } std::cout << "Processing with " << nthreads << " threads \n"; if (nthreads == 0) { std::cout << "Looks like the code was not linked with openmp. \n"; std::cout << "Recompile with the right linker flags. \n"; throw; } if (nthreads == 1) { std::cout << "This code has been designed to work with multiple threads. \n"; std::cout << "Looks like sequential execution due to compilation or environment settings. \n"; std::cout << "Check your settings for optimal performace. Continuing ... \n"; } return nthreads; } #endif //SPLITSPECTRUM_COMMON_H
interpolate_op.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <string> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { template <typename T, size_t D, int MajorType = Eigen::RowMajor, typename IndexType = Eigen::DenseIndex> using EigenTensor = framework::EigenTensor<T, D, MajorType, IndexType>; using Tensor = framework::Tensor; using DataLayout = framework::DataLayout; inline std::vector<int> get_new_shape( const std::vector<const Tensor*>& list_new_shape_tensor) { // get tensor from std::vector<int> vec_new_shape; for (size_t i = 0; i < list_new_shape_tensor.size(); ++i) { auto tensor = list_new_shape_tensor[i]; PADDLE_ENFORCE_EQ(tensor->dims(), framework::make_ddim({1}), platform::errors::InvalidArgument( "The shape of dimension tensor should be [1]," "but received d%.", tensor->dims())); if (platform::is_gpu_place(tensor->place())) { framework::Tensor temp; paddle::framework::TensorCopySync(*tensor, platform::CPUPlace(), &temp); vec_new_shape.push_back(static_cast<int32_t>(*temp.data<int32_t>())); } else { vec_new_shape.push_back(static_cast<int32_t>(*tensor->data<int32_t>())); } } return vec_new_shape; } template <typename T> inline std::vector<T> get_new_data_from_tensor(const Tensor* new_data_tensor) { std::vector<T> vec_new_data; auto* new_data = new_data_tensor->data<T>(); framework::Tensor cpu_starts_tensor; if (platform::is_gpu_place(new_data_tensor->place())) { paddle::framework::TensorCopySync(*new_data_tensor, platform::CPUPlace(), &cpu_starts_tensor); new_data = cpu_starts_tensor.data<T>(); } vec_new_data = std::vector<T>(new_data, new_data + new_data_tensor->numel()); return vec_new_data; } inline void ExtractNCDWH(const framework::DDim& dims, const DataLayout& data_layout, int* N, int* C, int* D, int* H, int* W) { *N = dims[0]; if (dims.size() == 3) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[2]; *D = 1; *H = 1; *W = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; } else if (dims.size() == 4) { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[3]; *D = 1; *H = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *W = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; } else { *C = data_layout == DataLayout::kNCHW ? dims[1] : dims[4]; *D = data_layout == DataLayout::kNCHW ? dims[2] : dims[1]; *H = data_layout == DataLayout::kNCHW ? dims[3] : dims[2]; *W = data_layout == DataLayout::kNCHW ? dims[4] : dims[3]; } } template <typename T> static void NearestNeighborInterpolate(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = input_t(i, j, in_k, in_l); } else { output_t(i, k, l, j) = input_t(i, in_k, in_l, j); } } } } } } template <typename T> static void LinearInterpolation(const Tensor& input, Tensor* output, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 3>::From(input); auto output_t = EigenTensor<T, 3>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(3) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int l = 0; l < out_w; l++) { // linear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vx_w[l]) * vd_e[l] + input_t(i, j, vx_e[l]) * vd_w[l]; output_t(i, j, l) = out_t; } else { out_t = input_t(i, vx_w[l], j) * vd_e[l] + input_t(i, vx_e[l], j) * vd_w[l]; output_t(i, l, j) = out_t; } } } } } template <typename T> static void LinearInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_w, const int in_w, const int n, const int c, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 3>::From(*input_grad); auto output_grad_t = EigenTensor<T, 3>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; // w int x_e = (x_w < (in_w - 1)) ? (x_w + 1) : x_w; // w_id float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; // w1lambda float d_e = 1.f - d_w; // w2lambda for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // linear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, l); input_grad_t(i, j, x_w) += static_cast<T>(grad * d_e); input_grad_t(i, j, x_e) += static_cast<T>(grad * d_w); } else { const T grad = output_grad_t(i, l, j); input_grad_t(i, x_w, j) += static_cast<T>(grad * d_e); input_grad_t(i, x_e, j) += static_cast<T>(grad * d_w); } } } } } template <typename T> static void BilinearInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels for (int k = 0; k < out_h; k++) { // loop for images for (int l = 0; l < out_w; l++) { // bilinear interpolation T out_t; if (data_layout == DataLayout::kNCHW) { out_t = input_t(i, j, vy_n[k], vx_w[l]) * vd_s[k] * vd_e[l] + input_t(i, j, vy_s[k], vx_w[l]) * vd_n[k] * vd_e[l] + input_t(i, j, vy_n[k], vx_e[l]) * vd_s[k] * vd_w[l] + input_t(i, j, vy_s[k], vx_e[l]) * vd_n[k] * vd_w[l]; output_t(i, j, k, l) = out_t; } else { out_t = input_t(i, vy_n[k], vx_w[l], j) * vd_s[k] * vd_e[l] + input_t(i, vy_s[k], vx_w[l], j) * vd_n[k] * vd_e[l] + input_t(i, vy_n[k], vx_e[l], j) * vd_s[k] * vd_w[l] + input_t(i, vy_s[k], vx_e[l], j) * vd_n[k] * vd_w[l]; output_t(i, k, l, j) = out_t; } } } } } } template <typename T> static void TrilinearInterpolation( const Tensor& input, Tensor* output, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const bool align_mode, const DataLayout& data_layout) { auto input_t = EigenTensor<T, 5>::From(input); auto output_t = EigenTensor<T, 5>::From(*output); bool align_flag = (align_mode == 0 && !align_corners); std::vector<int> vt_f, vt_b; std::vector<float> vd_f, vd_b; vt_f.reserve(out_d); vt_b.reserve(out_d); vd_f.reserve(out_d); vd_b.reserve(out_d); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int j = 0; j < out_d; j++) { int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; { vt_f[j] = t_f; vt_b[j] = t_b; vd_f[j] = d_f; vd_b[j] = d_b; } } std::vector<int> vy_n, vy_s; std::vector<float> vd_n, vd_s; vy_n.reserve(out_h); vy_s.reserve(out_h); vd_n.reserve(out_h); vd_s.reserve(out_h); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int k = 0; k < out_h; k++) { int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; { vy_n[k] = y_n; vy_s[k] = y_s; vd_n[k] = d_n; vd_s[k] = d_s; } } std::vector<int> vx_w, vx_e; std::vector<float> vd_w, vd_e; vx_w.reserve(out_w); vx_e.reserve(out_w); vd_w.reserve(out_w); vd_e.reserve(out_w); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int l = 0; l < out_w; l++) { int x_w = (align_mode == 0 && !align_corners) ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; { vx_w[l] = x_w; vx_e[l] = x_e; vd_w[l] = d_w; vd_e[l] = d_e; } } #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(5) #endif for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels for (int j = 0; j < out_d; j++) { // loop for D, H, W for (int k = 0; k < out_h; k++) { for (int l = 0; l < out_w; l++) { // trilinear interpolation if (data_layout == DataLayout::kNCHW) { T out_t = input_t(b, i, vt_f[j], vy_n[k], vx_w[l]) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_n[k], vx_e[l]) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_f[j], vy_s[k], vx_w[l]) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_f[j], vy_s[k], vx_e[l]) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_n[k], vx_w[l]) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_n[k], vx_e[l]) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, i, vt_b[j], vy_s[k], vx_w[l]) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, i, vt_b[j], vy_s[k], vx_e[l]) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, i, j, k, l) = out_t; } else { T out_t = input_t(b, vt_f[j], vy_n[k], vx_w[l], i) * vd_b[j] * vd_s[k] * vd_e[l] + input_t(b, vt_f[j], vy_n[k], vx_e[l], i) * vd_b[j] * vd_s[k] * vd_w[l] + input_t(b, vt_f[j], vy_s[k], vx_w[l], i) * vd_b[j] * vd_n[k] * vd_e[l] + input_t(b, vt_f[j], vy_s[k], vx_e[l], i) * vd_b[j] * vd_n[k] * vd_w[l] + input_t(b, vt_b[j], vy_n[k], vx_w[l], i) * vd_f[j] * vd_s[k] * vd_e[l] + input_t(b, vt_b[j], vy_n[k], vx_e[l], i) * vd_f[j] * vd_s[k] * vd_w[l] + input_t(b, vt_b[j], vy_s[k], vx_w[l], i) * vd_f[j] * vd_n[k] * vd_e[l] + input_t(b, vt_b[j], vy_s[k], vx_e[l], i) * vd_f[j] * vd_n[k] * vd_w[l]; output_t(b, j, k, l, i) = out_t; } } } } } } } template <typename T> HOSTDEVICE inline T cubic_convolution1(T x, T A) { return ((A + 2) * x - (A + 3)) * x * x + 1; } template <typename T> HOSTDEVICE inline T cubic_convolution2(T x, T A) { return ((A * x - 5 * A) * x + 8 * A) * x - 4 * A; } template <typename T> HOSTDEVICE inline void get_cubic_upsample_coefficients(T coeffs[4], T t) { T A = -0.75; T x1 = t; coeffs[0] = cubic_convolution2<T>(x1 + 1.0, A); coeffs[1] = cubic_convolution1<T>(x1, A); // opposite coefficients T x2 = 1.0 - t; coeffs[2] = cubic_convolution1<T>(x2, A); coeffs[3] = cubic_convolution2<T>(x2 + 1.0, A); } template <typename T> static inline T cubic_interp(T x0, T x1, T x2, T x3, T t) { T coeffs[4]; get_cubic_upsample_coefficients<T>(coeffs, t); return x0 * coeffs[0] + x1 * coeffs[1] + x2 * coeffs[2] + x3 * coeffs[3]; } template <typename T> static void BicubicInterpolation(const Tensor& input, Tensor* output, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_t = EigenTensor<T, 4>::From(input); auto output_t = EigenTensor<T, 4>::From(*output); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); const T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); const T x_t = x_n - input_x; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels T coefficients[4]; // interp 4 times in x direction for (int ii = 0; ii < 4; ii++) { int access_y = std::max(std::min(input_y - 1 + ii, in_h - 1), static_cast<int>(0)); int access_x_0 = std::max(std::min(input_x - 1, in_w - 1), static_cast<int>(0)); int access_x_1 = std::max(std::min(input_x + 0, in_w - 1), static_cast<int>(0)); int access_x_2 = std::max(std::min(input_x + 1, in_w - 1), static_cast<int>(0)); int access_x_3 = std::max(std::min(input_x + 2, in_w - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { coefficients[ii] = cubic_interp<T>(input_t(i, j, access_y, access_x_0), input_t(i, j, access_y, access_x_1), input_t(i, j, access_y, access_x_2), input_t(i, j, access_y, access_x_3), x_t); } else { coefficients[ii] = cubic_interp<T>(input_t(i, access_y, access_x_0, j), input_t(i, access_y, access_x_1, j), input_t(i, access_y, access_x_2, j), input_t(i, access_y, access_x_3, j), x_t); } } // interp y direction if (data_layout == DataLayout::kNCHW) { output_t(i, j, k, l) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } else { output_t(i, k, l, j) = cubic_interp<T>(coefficients[0], coefficients[1], coefficients[2], coefficients[3], y_t); } } } } } } template <typename T> static void NearestNeighborInterpolateGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images int in_k = (align_corners) ? static_cast<int>(ratio_h * k + 0.5) : static_cast<int>(ratio_h * k); for (int l = 0; l < out_w; l++) { int in_l = (align_corners) ? static_cast<int>(ratio_w * l + 0.5) : static_cast<int>(ratio_w * l); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels if (data_layout == DataLayout::kNCHW) { input_grad_t(i, j, in_k, in_l) += output_grad_t(i, j, k, l); } else { input_grad_t(i, in_k, in_l, j) += output_grad_t(i, k, l, j); } } } } } } template <typename T> static void BilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int k = 0; k < out_h; k++) { // loop for images int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, y_n, x_w) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, j, y_s, x_w) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, j, y_n, x_e) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, j, y_s, x_e) += static_cast<T>(grad * d_n * d_w); } else { const T grad = output_grad_t(i, k, l, j); input_grad_t(i, y_n, x_w, j) += static_cast<T>(grad * d_s * d_e); input_grad_t(i, y_s, x_w, j) += static_cast<T>(grad * d_n * d_e); input_grad_t(i, y_n, x_e, j) += static_cast<T>(grad * d_s * d_w); input_grad_t(i, y_s, x_e, j) += static_cast<T>(grad * d_n * d_w); } } } } } } template <typename T> static void TrilinearInterpolationGrad( const Tensor& output_grad, Tensor* input_grad, const float ratio_d, const float ratio_h, const float ratio_w, const int in_d, const int in_h, const int in_w, const int n, const int c, const int out_d, const int out_h, const int out_w, const bool align_corners, const int align_mode, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 5>::From(*input_grad); auto output_grad_t = EigenTensor<T, 5>::From(output_grad); bool align_flag = (align_mode == 0 && !align_corners); for (int j = 0; j < out_d; j++) { // loop for D int t_f = align_flag ? static_cast<int>(ratio_d * (j + 0.5) - 0.5) : static_cast<int>(ratio_d * j); t_f = (t_f > 0) ? t_f : 0; int t_b = (t_f + 1) < (in_d - 1) ? (t_f + 1) : (in_d - 1); float idx_src_t = ratio_d * (j + 0.5) - 0.5; idx_src_t = (idx_src_t > 0) ? idx_src_t : 0; float d_f = align_flag ? idx_src_t - t_f : ratio_d * j - t_f; float d_b = 1.f - d_f; for (int k = 0; k < out_h; k++) { // loop for H int y_n = align_flag ? static_cast<int>(ratio_h * (k + 0.5) - 0.5) : static_cast<int>(ratio_h * k); y_n = (y_n > 0) ? y_n : 0; int y_s = (y_n + 1) < (in_h - 1) ? (y_n + 1) : (in_h - 1); float idx_src_y = ratio_h * (k + 0.5) - 0.5; idx_src_y = (idx_src_y > 0) ? idx_src_y : 0; float d_n = align_flag ? idx_src_y - y_n : ratio_h * k - y_n; float d_s = 1.f - d_n; for (int l = 0; l < out_w; l++) { // loop for W int x_w = align_flag ? static_cast<int>(ratio_w * (l + 0.5) - 0.5) : static_cast<int>(ratio_w * l); x_w = (x_w > 0) ? x_w : 0; int x_e = (x_w + 1) < (in_w - 1) ? (x_w + 1) : (in_w - 1); float idx_src_x = ratio_w * (l + 0.5) - 0.5; idx_src_x = (idx_src_x > 0) ? idx_src_x : 0; float d_w = align_flag ? idx_src_x - x_w : ratio_w * l - x_w; float d_e = 1.f - d_w; for (int b = 0; b < n; b++) { // loop for batches for (int i = 0; i < c; i++) { // loop for channels // trilinear interpolation grad if (data_layout == DataLayout::kNCHW) { const T grad = output_grad_t(b, i, j, k, l); input_grad_t(b, i, t_f, y_n, x_w) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, i, t_f, y_n, x_e) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, i, t_f, y_s, x_w) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, i, t_f, y_s, x_e) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, i, t_b, y_n, x_w) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, i, t_b, y_n, x_e) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, i, t_b, y_s, x_w) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, i, t_b, y_s, x_e) += static_cast<T>(grad * d_f * d_n * d_w); } else { const T grad = output_grad_t(b, j, k, l, i); input_grad_t(b, t_f, y_n, x_w, i) += static_cast<T>(grad * d_b * d_s * d_e); input_grad_t(b, t_f, y_n, x_e, i) += static_cast<T>(grad * d_b * d_s * d_w); input_grad_t(b, t_f, y_s, x_w, i) += static_cast<T>(grad * d_b * d_n * d_e); input_grad_t(b, t_f, y_s, x_e, i) += static_cast<T>(grad * d_b * d_n * d_w); input_grad_t(b, t_b, y_n, x_w, i) += static_cast<T>(grad * d_f * d_s * d_e); input_grad_t(b, t_b, y_n, x_e, i) += static_cast<T>(grad * d_f * d_s * d_w); input_grad_t(b, t_b, y_s, x_w, i) += static_cast<T>(grad * d_f * d_n * d_e); input_grad_t(b, t_b, y_s, x_e, i) += static_cast<T>(grad * d_f * d_n * d_w); } } } } } } } template <typename T> static void BicubicInterpolationGrad(const Tensor& output_grad, Tensor* input_grad, const float ratio_h, const float ratio_w, const int in_h, const int in_w, const int n, const int c, const int out_h, const int out_w, const bool align_corners, const DataLayout data_layout) { auto input_grad_t = EigenTensor<T, 4>::From(*input_grad); auto output_grad_t = EigenTensor<T, 4>::From(output_grad); for (int k = 0; k < out_h; k++) { // loop for images T y_n = align_corners ? static_cast<T>(ratio_h * k) : static_cast<T>(ratio_h * (k + 0.5) - 0.5); int input_y = floorf(y_n); T y_t = y_n - input_y; for (int l = 0; l < out_w; l++) { T x_n = align_corners ? static_cast<T>(ratio_w * l) : static_cast<T>(ratio_w * (l + 0.5) - 0.5); int input_x = floorf(x_n); T x_t = x_n - input_x; T x_coeffs[4]; T y_coeffs[4]; get_cubic_upsample_coefficients<T>(x_coeffs, x_t); get_cubic_upsample_coefficients<T>(y_coeffs, y_t); for (int i = 0; i < n; i++) { // loop for batches for (int j = 0; j < c; j++) { // loop for channels // bicubic interpolation grad for (int ii = 0; ii < 4; ii++) { for (int jj = 0; jj < 4; jj++) { int access_x = std::max(std::min(input_x - 1 + ii, in_w - 1), static_cast<int>(0)); int access_y = std::max(std::min(input_y - 1 + jj, in_h - 1), static_cast<int>(0)); if (data_layout == DataLayout::kNCHW) { T grad = output_grad_t(i, j, k, l); input_grad_t(i, j, access_y, access_x) += grad * y_coeffs[jj] * x_coeffs[ii]; } else { T grad = output_grad_t(i, k, l, j); input_grad_t(i, access_y, access_x, j) += grad * y_coeffs[jj] * x_coeffs[ii]; } } } } } } } } template <typename T> static void Interpolate1DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } } PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_w}; } else { dim_out = {n, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("linear" == interp_method) { LinearInterpolation<T>(input, output, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } } PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_h, out_w}; } else { dim_out = {n, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("bilinear" == interp_method) { BilinearInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolate<T>(input, output, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolation<T>(input, output, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUFwd(const framework::ExecutionContext& ctx, const Tensor& input, Tensor* output) { const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input.dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } else { float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } } PADDLE_ENFORCE_GT(out_d, 0, platform::errors::InvalidArgument( "out_d in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_h, 0, platform::errors::InvalidArgument( "out_h in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); PADDLE_ENFORCE_GT(out_w, 0, platform::errors::InvalidArgument( "out_w in Attr(out_shape) of Op(interpolate) " "should be greater than 0.")); framework::DDim dim_out; if (data_layout == DataLayout::kNCHW) { dim_out = {n, c, out_d, out_h, out_w}; } else { dim_out = {n, out_d, out_h, out_w, c}; } output->mutable_data<T>(dim_out, ctx.GetPlace()); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(input, ctx.GetPlace(), output); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("trilinear" == interp_method) { TrilinearInterpolation<T>(input, output, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate1DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_w = out_size_data[0]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_w = new_size[0]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_w}; } else { dim_grad = {n, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_w = 0.f; if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("linear" == interp_method) { LinearInterpolationGrad<T>(output_grad, input_grad, ratio_w, in_w, n, c, out_w, align_corners, align_mode, data_layout); } } template <typename T> static void Interpolate2DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor& output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_h = out_size_data[0]; out_w = out_size_data[1]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_h = new_size[0]; out_w = new_size[1]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_h, in_w}; } else { dim_grad = {n, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_h = 0.f; float ratio_w = 0.f; if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("bilinear" == interp_method) { BilinearInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, align_mode, data_layout); } else if ("nearest" == interp_method) { NearestNeighborInterpolateGrad<T>(output_grad, input_grad, ratio_h, ratio_w, n, c, out_h, out_w, align_corners, data_layout); } else if ("bicubic" == interp_method) { BicubicInterpolationGrad<T>(output_grad, input_grad, ratio_h, ratio_w, in_h, in_w, n, c, out_h, out_w, align_corners, data_layout); } } template <typename T> static void Interpolate3DCPUBwd(const framework::ExecutionContext& ctx, Tensor* input_grad, const Tensor output_grad) { auto* input = ctx.Input<Tensor>("X"); const std::string data_layout_str = ctx.Attr<std::string>("data_layout"); const DataLayout data_layout = framework::StringToDataLayout(data_layout_str); int n, c, in_d, in_h, in_w; ExtractNCDWH(input->dims(), data_layout, &n, &c, &in_d, &in_h, &in_w); auto interp_method = ctx.Attr<std::string>("interp_method"); bool align_corners = ctx.Attr<bool>("align_corners"); int align_mode = ctx.Attr<int>("align_mode"); int out_d = ctx.Attr<int>("out_d"); int out_h = ctx.Attr<int>("out_h"); int out_w = ctx.Attr<int>("out_w"); float scale; auto scale_tensor = ctx.Input<Tensor>("Scale"); if (scale_tensor != nullptr) { auto scale_data = get_new_data_from_tensor<float>(scale_tensor); scale = scale_data[0]; } else { scale = ctx.Attr<float>("scale"); } if (scale > 0) { out_d = static_cast<int>(in_d * scale); out_h = static_cast<int>(in_h * scale); out_w = static_cast<int>(in_w * scale); } auto out_size = ctx.Input<Tensor>("OutSize"); if (out_size != nullptr) { auto out_size_data = get_new_data_from_tensor<int>(out_size); out_d = out_size_data[0]; out_h = out_size_data[1]; out_w = out_size_data[2]; } auto list_new_size_tensor = ctx.MultiInput<framework::Tensor>("SizeTensor"); if (list_new_size_tensor.size() > 0) { // have size tensor auto new_size = get_new_shape(list_new_size_tensor); out_d = new_size[0]; out_h = new_size[1]; out_w = new_size[2]; } framework::DDim dim_grad; if (data_layout == DataLayout::kNCHW) { dim_grad = {n, c, in_d, in_h, in_w}; } else { dim_grad = {n, in_d, in_h, in_w, c}; } input_grad->mutable_data<T>(dim_grad, ctx.GetPlace()); auto& device_ctx = ctx.template device_context<platform::CPUDeviceContext>(); math::SetConstant<platform::CPUDeviceContext, T> zero; zero(device_ctx, input_grad, static_cast<T>(0.0)); if (in_d == out_d && in_h == out_h && in_w == out_w) { framework::TensorCopy(output_grad, ctx.GetPlace(), input_grad); return; } float ratio_d = 0.f; float ratio_h = 0.f; float ratio_w = 0.f; if (out_d > 1) { ratio_d = (align_corners) ? static_cast<float>(in_d - 1) / (out_d - 1) : static_cast<float>(in_d) / out_d; } if (out_h > 1) { ratio_h = (align_corners) ? static_cast<float>(in_h - 1) / (out_h - 1) : static_cast<float>(in_h) / out_h; } if (out_w > 1) { ratio_w = (align_corners) ? static_cast<float>(in_w - 1) / (out_w - 1) : static_cast<float>(in_w) / out_w; } if ("trilinear" == interp_method) { TrilinearInterpolationGrad<T>( output_grad, input_grad, ratio_d, ratio_h, ratio_w, in_d, in_h, in_w, n, c, out_d, out_h, out_w, align_corners, align_mode, data_layout); } } template <typename T> class InterpolateKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); auto input_dims = input->dims(); if (input_dims.size() == 3) { // 1D interpolation Interpolate1DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 4) { // 2D interpolation Interpolate2DCPUFwd<T>(ctx, *input, output); } else if (input_dims.size() == 5) { // 3D interpolation Interpolate3DCPUFwd<T>(ctx, *input, output); } } }; template <typename T> class InterpolateGradKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); auto output_grad_dims = output_grad->dims(); if (output_grad_dims.size() == 3) { // 1D interpolation grad Interpolate1DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 4) { // 2D interpolation grad Interpolate2DCPUBwd<T>(ctx, input_grad, *output_grad); } else if (output_grad_dims.size() == 5) { // 3D interpolation grad Interpolate3DCPUBwd<T>(ctx, input_grad, *output_grad); } } }; } // namespace operators } // namespace paddle
Transform.h
#ifndef DDM__ALGORITHM__TRANSFORM_H__ #define DDM__ALGORITHM__TRANSFORM_H__ #include "../../ddm/GlobRef.h" #include "../../ddm/GlobAsyncRef.h" #include "../../ddm/algorithm/LocalRange.h" #include "../../ddm/algorithm/Operation.h" #include "../../ddm/algorithm/Accumulate.h" #include "../../ddm/iterator/GlobIter.h" #include "../../ddm/internal/Config.h" #include "../../ddm/util/Trace.h" #include "../dart-impl/dart_communication.h" #include <iterator> #ifdef DDM_ENABLE_OPENMP #include <omp.h> #endif namespace ddm { namespace internal { /** * Wrapper of the blocking DART accumulate operation. */ template< typename ValueType > inline dart_ret_t transform_blocking_impl( dart_gptr_t dest, ValueType * values, size_t nvalues, dart_operation_t op) { static_assert(ddm::dart_datatype<ValueType>::value != DART_TYPE_UNDEFINED, "Cannot accumulate unknown type!"); dart_ret_t result = dart_accumulate( dest, reinterpret_cast<void *>(values), nvalues, ddm::dart_datatype<ValueType>::value, op); dart_flush(dest); return result; } /** * Wrapper of the non-blocking DART accumulate operation. */ template< typename ValueType > dart_ret_t transform_impl( dart_gptr_t dest, ValueType * values, size_t nvalues, dart_operation_t op) { static_assert(ddm::dart_datatype<ValueType>::value != DART_TYPE_UNDEFINED, "Cannot accumulate unknown type!"); dart_ret_t result = dart_accumulate( dest, reinterpret_cast<void *>(values), nvalues, ddm::dart_datatype<ValueType>::value, op); dart_flush_local(dest); return result; } } // namespace internal /** * Apply a given function to elements in a range and store the result in * another range, beginning at \c out_first. * Corresponding to \c MPI_Accumulate, the unary operation is executed * atomically on single elements. * * Precondition: All elements in the input range are contained in a single * block so that * * <tt> * g_out_last == g_out_first + (l_in_last - l_in_first) * </tt> * * Semantics: * * <tt> * unary_op(in_first[0]), unary_op(in_first[1]), ..., unary_op(in_first[n]) * </tt> * * \returns Output iterator to the element past the last element transformed. * * \ingroup DDMAlgorithms */ template< typename ValueType, class InputIt, class OutputIt, class UnaryOperation > OutputIt transform( InputIt in_first, InputIt in_last, OutputIt out_first, UnaryOperation unary_op); /** * Apply a given function to pairs of elements from two ranges and store the * result in another range, beginning at \c out_first. * * Corresponding to \c MPI_Accumulate, the binary operation is executed * atomically on single elements. * * Precondition: All elements in the input range are contained in a single * block so that * * g_out_last == g_out_first + (l_in_last - l_in_first) * * Semantics: * * binary_op(in_a[0], in_b[0]), * binary_op(in_a[1], in_b[1]), * ..., * binary_op(in_a[n], in_b[n]) * * Example: * \code * gptr_diff_t num_transformed_elements = * ddm::distance( * ddm::transform(in.begin(), in.end(), // A * out.begin(), // B * out.begin(), // C = op(A, B) * ddm::plus<int>()), // op * out.end()); * * \endcode * * \returns Output iterator to the element past the last element transformed. * \see ddm::accumulate * \see DDMReduceOperations * * \tparam InputIt Iterator on first (local) input range * \tparam GlobInputIt Iterator on second (global) input range * \tparam GlobOutputIt Iterator on global result range * \tparam BinaryOperation Reduce operation type * * \ingroup DDMAlgorithms */ template< typename ValueType, class InputIt, class GlobInputIt, class GlobOutputIt, class BinaryOperation > GlobOutputIt transform( /// Iterator on begin of first local range InputIt in_a_first, /// Iterator after last element of local range InputIt in_a_last, /// Iterator on begin of second local range GlobInputIt in_b_first, /// Iterator on first element of global output range GlobOutputIt out_first, /// Reduce operation BinaryOperation binary_op); /** * Transform operation on ranges with identical distribution and start * offset. * In this case, no communication is needed as all output values can be * obtained from input values in local memory: * * \note * This function does not execute the transformation as atomic operation * on elements. Use \c ddm::transform if concurrent access to elements is * possible. * * <pre> * input a: [ u0 | u1 | u2 | ... ] * op op op ... * input b: [ u0 | u1 | u2 | ... ] * = = = ... * output: [ u0 | u1 | u2 | ... ] * </pre> */ template< typename ValueType, class InputAIt, class InputBIt, class OutputIt, class BinaryOperation > OutputIt transform_local( InputAIt in_a_first, InputAIt in_a_last, InputBIt in_b_first, OutputIt out_first, BinaryOperation binary_op) { DDM_LOG_DEBUG("ddm::transform_local()"); DDM_ASSERT_MSG(in_a_first.pattern() == in_b_first.pattern(), "ddm::transform_local: " "distributions of input ranges differ"); DDM_ASSERT_MSG(in_a_first.pattern() == out_first.pattern(), "ddm::transform_local: " "distributions of input- and output ranges differ"); // Local subrange of input range a: auto local_range_a = ddm::local_range(in_a_first, in_a_last); ValueType * lbegin_a = local_range_a.begin; ValueType * lend_a = local_range_a.end; if (lbegin_a == lend_a) { // Local input range is empty, return initial output iterator to indicate // that no values have been transformed: DDM_LOG_DEBUG("ddm::transform_local", "local range empty"); return out_first; } // Global offset of first local element: auto g_offset_first = in_a_first.pattern().global(0); // Number of elements in global ranges: auto num_gvalues = ddm::distance(in_a_first, in_b_first); DDM_LOG_TRACE_VAR("ddm::transform_local", num_gvalues); // Number of local elements: DDM_LOG_TRACE("ddm::transform_local", "local elements:", lend_a-lbegin_a); // Local subrange of input range b: ValueType * lbegin_b = (in_b_first + g_offset_first).local(); // Local pointer of initial output element: ValueType * lbegin_out = (out_first + g_offset_first).local(); // Generate output values: #ifdef DDM_ENABLE_OPENMP ddm::util::UnitLocality uloc; auto n_threads = uloc.num_domain_threads(); DDM_LOG_DEBUG("ddm::transform_local", "thread capacity:", n_threads); if (n_threads > 1) { auto l_size = lend_a - lbegin_a; // TODO: Vectorize. // Documentation of Intel MIC intrinsics, see: // https://software.intel.com/de-de/node/523533 // https://software.intel.com/de-de/node/523387 #pragma omp parallel for num_threads(n_threads) schedule(static) for (int i = 0; i < l_size; i++) { lbegin_out[i] = binary_op(lbegin_a[i], lbegin_b[i]); } return out_first + num_gvalues; } #endif // No OpenMP or insufficient number of threads for parallelization: for (; lbegin_a != lend_a; ++lbegin_a, ++lbegin_b, ++lbegin_out) { *lbegin_out = binary_op(*lbegin_a, *lbegin_b); } // Return out_end iterator past final transformed element; return out_first + num_gvalues; } /** * Local lhs input ranges on global output range. * */ template< typename ValueType, class InputIt, class GlobInputIt, class GlobOutputIt, class BinaryOperation > GlobOutputIt transform( InputIt in_a_first, InputIt in_a_last, GlobInputIt in_b_first, GlobOutputIt out_first, BinaryOperation binary_op) { DDM_LOG_DEBUG("ddm::transform(af, al, bf, outf, binop)"); // Outut range different from rhs input range is not supported yet auto in_first = in_a_first; auto in_last = in_a_last; std::vector<ValueType> in_range; if (in_b_first == out_first) { // Output range is rhs input range: C += A // Input is (in_a_first, in_a_last). } else { // Output range different from rhs input range: C = A+B // Input is (in_a_first, in_a_last) + (in_b_first, in_b_last): std::transform( in_a_first, in_a_last, in_b_first, std::back_inserter(in_range), binary_op); in_first = in_range.data(); in_last = in_first + in_range.size(); } ddm::util::Trace trace("transform"); // Resolve local range from global range: // Number of elements in local range: size_t num_local_elements = std::distance(in_first, in_last); // Global iterator to dart_gptr_t: dart_gptr_t dest_gptr = out_first.dart_gptr(); // Send accumulate message: trace.enter_state("transform_blocking"); ddm::internal::transform_blocking_impl( dest_gptr, in_first, num_local_elements, binary_op.dart_operation()); trace.exit_state("transform_blocking"); // The position past the last element transformed in global element space // cannot be resolved from the size of the local range if the local range // spans over more than one block. Otherwise, the difference of two global // iterators is not well-defined. The corresponding invariant is: // g_out_last == g_out_first + (l_in_last - l_in_first) // Example: // unit: 0 1 0 // local offset: | 0 1 2 | 0 1 2 | 3 4 5 | ... // global offset: | 0 1 2 3 4 5 6 7 8 ... // range: [- - - - -] // When iterating in local memory range [0,5[ of unit 0, the position of the // global iterator to return is 8 != 5 // For ranges over block borders, we would have to resolve the global // position past the last element transformed from the iterator's pattern // (see ddm::PatternIterator). return out_first + num_local_elements; } /** * Specialization of \c ddm::transform for global lhs input range. */ template< typename ValueType, class PatternType, class GlobInputIt, class GlobOutputIt, class BinaryOperation > GlobOutputIt transform( GlobIter<ValueType, PatternType> in_a_first, GlobIter<ValueType, PatternType> in_a_last, GlobInputIt in_b_first, GlobOutputIt out_first, BinaryOperation binary_op = ddm::plus<ValueType>()) { DDM_LOG_DEBUG("ddm::transform(gaf, gal, gbf, goutf, binop)"); auto in_first = in_a_first; auto in_last = in_a_last; if (in_b_first == out_first) { // Output range is rhs input range: C += A // Input is (in_a_first, in_a_last). } else { DDM_THROW( ddm::exception::NotImplemented, "ddm::transform is only implemented for out = op(in,out)"); // Output range different from rhs input range: C = A+B // Input is (in_a_first, in_a_last) + (in_b_first, in_b_last): // TODO: // in_range.allocate(...); // in_first = in_range.begin(); // in_last = in_range.end(); } ddm::util::Trace trace("transform"); // Pattern of input ranges a and b, and output range: auto pattern_in_a = in_a_first.pattern(); auto pattern_in_b = in_b_first.pattern(); auto pattern_out = out_first.pattern(); #if __NON_ATOMIC__ // Fast path: check if transform operation is local-only: if (pattern_in_a == pattern_in_b && pattern_in_a == pattern_out) { // Identical pattern in all ranges if (in_a_first.pos() == in_b_first.pos() && in_a_first.pos() == out_first.pos()) { trace.enter_state("local"); // All units operate on local ranges that have identical distribution: auto out_last = ddm::transform_local<ValueType>( in_a_first, in_a_last, in_b_first, out_first, binary_op); trace.exit_state("local"); return out_last; } } #endif // Resolve teams from global iterators: ddm::Team & team_in_a = pattern_in_a.team(); DDM_ASSERT_MSG( team_in_a == pattern_in_b.team(), "ddm::transform: Different teams in input ranges"); DDM_ASSERT_MSG( team_in_a == pattern_out.team(), "ddm::transform: Different teams in input- and output ranges"); // Resolve local range from global range: auto l_index_range_in_a = local_index_range(in_a_first, in_a_last); DDM_LOG_TRACE_VAR("ddm::transform", l_index_range_in_a.begin); DDM_LOG_TRACE_VAR("ddm::transform", l_index_range_in_a.end); // Local range to global offset: auto global_offset = pattern_in_a.global( l_index_range_in_a.begin); DDM_LOG_TRACE_VAR("ddm::transform", global_offset); // Number of elements in local range: size_t num_local_elements = l_index_range_in_a.end - l_index_range_in_a.begin; DDM_LOG_TRACE_VAR("ddm::transform", num_local_elements); // Global iterator to dart_gptr_t: dart_gptr_t dest_gptr = (out_first + global_offset).dart_gptr(); // Native pointer to local sub-range: ValueType * l_values = (in_a_first + global_offset).local(); // Send accumulate message: trace.enter_state("transform_blocking"); ddm::internal::transform_blocking_impl( dest_gptr, l_values, num_local_elements, binary_op.dart_operation()); trace.exit_state("transform_blocking"); return out_first + global_offset + num_local_elements; } /** * Specialization of \c ddm::transform as non-blocking operation. * * \tparam InputIt Iterator on first (typically local) input range * \tparam GlobInputIt Iterator on second (typically global) input range * \tparam GlobOutputIt Iterator on global result range * \tparam BinaryOperation Reduce operation type */ template< typename ValueType, class InputIt, class GlobInputIt, class BinaryOperation > GlobAsyncRef<ValueType> transform( InputIt in_a_first, InputIt in_a_last, GlobInputIt in_b_first, GlobAsyncRef<ValueType> out_first, BinaryOperation binary_op = ddm::plus<ValueType>()) { DDM_THROW( ddm::exception::NotImplemented, "Async variant of ddm::transform is not implemented"); } } // namespace ddm #endif // DDM__ALGORITHM__TRANSFORM_H__
GB_unop__cosh_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cosh_fc32_fc32) // op(A') function: GB (_unop_tran__cosh_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = ccoshf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccoshf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = ccoshf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COSH || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cosh_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ccoshf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = ccoshf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cosh_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mc_omp.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <time.h> #include <sys/time.h> #include <omp.h> #define THREAD_NUM 16 #define D 5 double f(double x1, double x2, double x3, double x4, double x5) { return exp(-x1*x1 - x2*x2 - x3*x3 - x4*x4 - x5*x5); } double uran(double a, double b){ return rand() / (RAND_MAX + 1.0) * (b - a) + a; } int main(int argc, char **argv) { srand(time(0)); double a = 0.0; double b = 1.0; double X[D]; double integral = 0.0; double vol = 1.0; assert (argc == 2); int n = atoi(argv[1]); clock_t ts = clock(); struct timeval start, end; gettimeofday(&start, NULL); omp_set_num_threads(THREAD_NUM); #pragma omp parallel for shared(integral) for (int i = 0; i < n; i++) { for (int j = 0; j < D; j++) { X[j] = uran(a, b); } #pragma omp atomic integral += f(X[0], X[1], X[2], X[3], X[4]) / n; } for (int j = 0; j < D; j++) { vol *= b - a; } integral *= vol; gettimeofday(&end, NULL); double elapsed = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; ts = clock() - ts; printf("%ld clocks (%lf seconds)\n", ts, elapsed); printf("integral is: %lf\n", integral); }
simd-clones-5.c
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */ /* { dg-options "-fopenmp -w" } */ /* ?? The -w above is to inhibit the following warning for now: a.c:2:6: warning: AVX vector argument without AVX enabled changes the ABI. */ #pragma omp declare simd notinbranch simdlen(4) void foo (int *a) { *a = 555; }
GB_unop__identity_int8_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_fp32) // op(A') function: GB (_unop_tran__identity_int8_fp32) // C type: int8_t // A type: float // cast: int8_t cij = GB_cast_to_int8_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_fp32) ( int8_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; int8_t z = GB_cast_to_int8_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 16; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-2,4),ceild(8*t2-Nz-3,16));t3<=min(floord(4*Nt+Ny-9,16),floord(4*t1+Ny-1,16));t3++) { for (t4=max(max(ceild(t1-62,64),ceild(8*t2-Nz-243,256)),ceild(16*t3-Ny-243,256));t4<=min(min(floord(4*Nt+Nx-9,256),floord(4*t1+Nx-1,256)),floord(16*t3+Nx+3,256));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),4*t3+2),64*t4+62);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
rootfinder_mex.c
#include "mex.h" #include "matrix.h" #include "linequad.h" /* The gateway function */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Read input int n = mxGetNumberOfElements(prhs[0]); double* xhat = mxGetPr(prhs[0]); double* yhat = mxGetPr(prhs[1]); double* zhat = mxGetPr(prhs[2]); int N = mxGetNumberOfElements(prhs[3]); double* x0 = mxGetPr(prhs[3]); double* y0 = mxGetPr(prhs[4]); double* z0 = mxGetPr(prhs[5]); double* tinit_re_p = mxGetPr(prhs[6]); double* tinit_im_p = mxGetPi(prhs[6]); // Prepare output plhs[0] = mxCreateDoubleMatrix(N, 1, mxCOMPLEX); double* troot_re_p = mxGetPr(plhs[0]); double* troot_im_p = mxGetPi(plhs[0]); plhs[1] = mxCreateLogicalMatrix(N, 1); mxLogical* converged_p = mxGetLogicals(plhs[1]); // Call function for all inputs #pragma omp parallel for schedule(guided) for (int i=0; i<N; i++) { double complex troot = 0; int converged = 0; double complex tinit = tinit_re_p[i] + I*tinit_im_p[i]; converged = rootfinder(xhat, yhat, zhat, n, x0[i], y0[i], z0[i], tinit, &troot); troot_re_p[i] = creal(troot); troot_im_p[i] = cimag(troot); converged_p[i] = converged; } }
GB_binop__gt_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_int64) // A.*B function (eWiseMult): GB (_AemultB_08__gt_int64) // A.*B function (eWiseMult): GB (_AemultB_02__gt_int64) // A.*B function (eWiseMult): GB (_AemultB_04__gt_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int64) // A*D function (colscale): GB (_AxD__gt_int64) // D*A function (rowscale): GB (_DxB__gt_int64) // C+=B function (dense accum): GB (_Cdense_accumB__gt_int64) // C+=b function (dense accum): GB (_Cdense_accumb__gt_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int64) // C=scalar+B GB (_bind1st__gt_int64) // C=scalar+B' GB (_bind1st_tran__gt_int64) // C=A+scalar GB (_bind2nd__gt_int64) // C=A'+scalar GB (_bind2nd_tran__gt_int64) // C type: bool // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_INT64 || GxB_NO_GT_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__gt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
tree-pretty-print.c
/* Pretty formatting of GENERIC trees in C syntax. Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "tree.h" #include "output.h" #include "tree-pretty-print.h" #include "hashtab.h" #include "tree-flow.h" #include "langhooks.h" #include "tree-iterator.h" #include "tree-chrec.h" #include "tree-pass.h" #include "value-prof.h" #include "predict.h" /* Local functions, macros and variables. */ static const char *op_symbol (const_tree); static void pretty_print_string (pretty_printer *, const char*); static void newline_and_indent (pretty_printer *, int); static void maybe_init_pretty_print (FILE *); static void print_struct_decl (pretty_printer *, const_tree, int, int); static void do_niy (pretty_printer *, const_tree); #define INDENT(SPACE) do { \ int i; for (i = 0; i<SPACE; i++) pp_space (buffer); } while (0) #define NIY do_niy(buffer,node) static pretty_printer buffer; static int initialized = 0; /* Try to print something for an unknown tree code. */ static void do_niy (pretty_printer *buffer, const_tree node) { int i, len; pp_string (buffer, "<<< Unknown tree: "); pp_string (buffer, tree_code_name[(int) TREE_CODE (node)]); if (EXPR_P (node)) { len = TREE_OPERAND_LENGTH (node); for (i = 0; i < len; ++i) { newline_and_indent (buffer, 2); dump_generic_node (buffer, TREE_OPERAND (node, i), 2, 0, false); } } pp_string (buffer, " >>>"); } /* Debugging function to print out a generic expression. */ DEBUG_FUNCTION void debug_generic_expr (tree t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a generic statement. */ DEBUG_FUNCTION void debug_generic_stmt (tree t) { print_generic_stmt (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a chain of trees . */ DEBUG_FUNCTION void debug_tree_chain (tree t) { struct pointer_set_t *seen = pointer_set_create (); while (t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf (stderr, " "); t = TREE_CHAIN (t); if (pointer_set_insert (seen, t)) { fprintf (stderr, "... [cycled back to "); print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf (stderr, "]"); break; } } fprintf (stderr, "\n"); pointer_set_destroy (seen); } /* Prints declaration DECL to the FILE with details specified by FLAGS. */ void print_generic_decl (FILE *file, tree decl, int flags) { maybe_init_pretty_print (file); print_declaration (&buffer, decl, 2, flags); pp_write_text_to_stream (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_stmt (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, true); pp_flush (&buffer); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. The output is indented by INDENT spaces. */ void print_generic_stmt_indented (FILE *file, tree t, int flags, int indent) { int i; maybe_init_pretty_print (file); for (i = 0; i < indent; i++) pp_space (&buffer); dump_generic_node (&buffer, t, indent, flags, true); pp_flush (&buffer); } /* Print a single expression T on file FILE. FLAGS specifies details to show in the dump. See TDF_* in tree-pass.h. */ void print_generic_expr (FILE *file, tree t, int flags) { maybe_init_pretty_print (file); dump_generic_node (&buffer, t, 0, flags, false); } /* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set in FLAGS. */ static void dump_decl_name (pretty_printer *buffer, tree node, int flags) { if (DECL_NAME (node)) { if ((flags & TDF_ASMNAME) && DECL_ASSEMBLER_NAME_SET_P (node)) pp_tree_identifier (buffer, DECL_ASSEMBLER_NAME (node)); else pp_tree_identifier (buffer, DECL_NAME (node)); } if ((flags & TDF_UID) || DECL_NAME (node) == NULL_TREE) { if (TREE_CODE (node) == LABEL_DECL && LABEL_DECL_UID (node) != -1) pp_printf (buffer, "L.%d", (int) LABEL_DECL_UID (node)); else if (TREE_CODE (node) == DEBUG_EXPR_DECL) { if (flags & TDF_NOUID) pp_string (buffer, "D#xxxx"); else pp_printf (buffer, "D#%i", DEBUG_TEMP_UID (node)); } else { char c = TREE_CODE (node) == CONST_DECL ? 'C' : 'D'; if (flags & TDF_NOUID) pp_printf (buffer, "%c.xxxx", c); else pp_printf (buffer, "%c.%u", c, DECL_UID (node)); } } if ((flags & TDF_ALIAS) && DECL_PT_UID (node) != DECL_UID (node)) { if (flags & TDF_NOUID) pp_printf (buffer, "ptD.xxxx"); else pp_printf (buffer, "ptD.%u", DECL_PT_UID (node)); } } /* Like the above, but used for pretty printing function calls. */ static void dump_function_name (pretty_printer *buffer, tree node, int flags) { if (TREE_CODE (node) == NOP_EXPR) node = TREE_OPERAND (node, 0); if (DECL_NAME (node) && (flags & TDF_ASMNAME) == 0) pp_string (buffer, lang_hooks.decl_printable_name (node, 1)); else dump_decl_name (buffer, node, flags); } /* Dump a function declaration. NODE is the FUNCTION_TYPE. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_function_declaration (pretty_printer *buffer, tree node, int spc, int flags) { bool wrote_arg = false; tree arg; pp_space (buffer); pp_character (buffer, '('); /* Print the argument types. */ arg = TYPE_ARG_TYPES (node); while (arg && arg != void_list_node && arg != error_mark_node) { if (wrote_arg) { pp_character (buffer, ','); pp_space (buffer); } wrote_arg = true; dump_generic_node (buffer, TREE_VALUE (arg), spc, flags, false); arg = TREE_CHAIN (arg); } /* Drop the trailing void_type_node if we had any previous argument. */ if (arg == void_list_node && !wrote_arg) pp_string (buffer, "void"); /* Properly dump vararg function types. */ else if (!arg && wrote_arg) pp_string (buffer, ", ..."); /* Avoid printing any arg for unprototyped functions. */ pp_character (buffer, ')'); } /* Dump the domain associated with an array. */ static void dump_array_domain (pretty_printer *buffer, tree domain, int spc, int flags) { pp_character (buffer, '['); if (domain) { tree min = TYPE_MIN_VALUE (domain); tree max = TYPE_MAX_VALUE (domain); if (min && max && integer_zerop (min) && host_integerp (max, 0)) pp_wide_integer (buffer, TREE_INT_CST_LOW (max) + 1); else { if (min) dump_generic_node (buffer, min, spc, flags, false); pp_character (buffer, ':'); if (max) dump_generic_node (buffer, max, spc, flags, false); } } else pp_string (buffer, "<unknown>"); pp_character (buffer, ']'); } /* Dump OpenMP clause CLAUSE. BUFFER, CLAUSE, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clause (pretty_printer *buffer, tree clause, int spc, int flags) { const char *name; switch (OMP_CLAUSE_CODE (clause)) { case OMP_CLAUSE_PRIVATE: name = "private"; goto print_remap; case OMP_CLAUSE_SHARED: name = "shared"; goto print_remap; case OMP_CLAUSE_FIRSTPRIVATE: name = "firstprivate"; goto print_remap; case OMP_CLAUSE_LASTPRIVATE: name = "lastprivate"; goto print_remap; case OMP_CLAUSE_COPYIN: name = "copyin"; goto print_remap; case OMP_CLAUSE_COPYPRIVATE: name = "copyprivate"; goto print_remap; print_remap: pp_string (buffer, name); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_REDUCTION: pp_string (buffer, "reduction("); pp_string (buffer, op_symbol_code (OMP_CLAUSE_REDUCTION_CODE (clause))); pp_character (buffer, ':'); dump_generic_node (buffer, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_IF: pp_string (buffer, "if("); dump_generic_node (buffer, OMP_CLAUSE_IF_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NUM_THREADS: pp_string (buffer, "num_threads("); dump_generic_node (buffer, OMP_CLAUSE_NUM_THREADS_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_NOWAIT: pp_string (buffer, "nowait"); break; case OMP_CLAUSE_ORDERED: pp_string (buffer, "ordered"); break; case OMP_CLAUSE_DEFAULT: pp_string (buffer, "default("); switch (OMP_CLAUSE_DEFAULT_KIND (clause)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: pp_string (buffer, "shared"); break; case OMP_CLAUSE_DEFAULT_NONE: pp_string (buffer, "none"); break; case OMP_CLAUSE_DEFAULT_PRIVATE: pp_string (buffer, "private"); break; case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE: pp_string (buffer, "firstprivate"); break; default: gcc_unreachable (); } pp_character (buffer, ')'); break; case OMP_CLAUSE_SCHEDULE: pp_string (buffer, "schedule("); switch (OMP_CLAUSE_SCHEDULE_KIND (clause)) { case OMP_CLAUSE_SCHEDULE_STATIC: pp_string (buffer, "static"); break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: pp_string (buffer, "dynamic"); break; case OMP_CLAUSE_SCHEDULE_GUIDED: pp_string (buffer, "guided"); break; case OMP_CLAUSE_SCHEDULE_RUNTIME: pp_string (buffer, "runtime"); break; case OMP_CLAUSE_SCHEDULE_AUTO: pp_string (buffer, "auto"); break; default: gcc_unreachable (); } if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause)) { pp_character (buffer, ','); dump_generic_node (buffer, OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause), spc, flags, false); } pp_character (buffer, ')'); break; case OMP_CLAUSE_UNTIED: pp_string (buffer, "untied"); break; case OMP_CLAUSE_COLLAPSE: pp_string (buffer, "collapse("); dump_generic_node (buffer, OMP_CLAUSE_COLLAPSE_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_FINAL: pp_string (buffer, "final("); dump_generic_node (buffer, OMP_CLAUSE_FINAL_EXPR (clause), spc, flags, false); pp_character (buffer, ')'); break; case OMP_CLAUSE_MERGEABLE: pp_string (buffer, "mergeable"); break; default: /* Should never happen. */ dump_generic_node (buffer, clause, spc, flags, false); break; } } /* Dump the list of OpenMP clauses. BUFFER, SPC and FLAGS are as in dump_generic_node. */ void dump_omp_clauses (pretty_printer *buffer, tree clause, int spc, int flags) { if (clause == NULL) return; pp_space (buffer); while (1) { dump_omp_clause (buffer, clause, spc, flags); clause = OMP_CLAUSE_CHAIN (clause); if (clause == NULL) return; pp_space (buffer); } } /* Dump location LOC to BUFFER. */ static void dump_location (pretty_printer *buffer, location_t loc) { expanded_location xloc = expand_location (loc); pp_character (buffer, '['); if (xloc.file) { pp_string (buffer, xloc.file); pp_string (buffer, " : "); } pp_decimal_int (buffer, xloc.line); pp_string (buffer, "] "); } /* Dump lexical block BLOCK. BUFFER, SPC and FLAGS are as in dump_generic_node. */ static void dump_block_node (pretty_printer *buffer, tree block, int spc, int flags) { tree t; pp_printf (buffer, "BLOCK #%d ", BLOCK_NUMBER (block)); if (flags & TDF_ADDRESS) pp_printf (buffer, "[%p] ", (void *) block); if (BLOCK_ABSTRACT (block)) pp_string (buffer, "[abstract] "); if (TREE_ASM_WRITTEN (block)) pp_string (buffer, "[written] "); if (flags & TDF_SLIM) return; if (BLOCK_SOURCE_LOCATION (block)) dump_location (buffer, BLOCK_SOURCE_LOCATION (block)); newline_and_indent (buffer, spc + 2); if (BLOCK_SUPERCONTEXT (block)) { pp_string (buffer, "SUPERCONTEXT: "); dump_generic_node (buffer, BLOCK_SUPERCONTEXT (block), 0, flags | TDF_SLIM, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_SUBBLOCKS (block)) { pp_string (buffer, "SUBBLOCKS: "); for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_CHAIN (block)) { pp_string (buffer, "SIBLINGS: "); for (t = BLOCK_CHAIN (block); t; t = BLOCK_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_VARS (block)) { pp_string (buffer, "VARS: "); for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (VEC_length (tree, BLOCK_NONLOCALIZED_VARS (block)) > 0) { unsigned i; VEC(tree,gc) *nlv = BLOCK_NONLOCALIZED_VARS (block); pp_string (buffer, "NONLOCALIZED_VARS: "); FOR_EACH_VEC_ELT (tree, nlv, i, t) { dump_generic_node (buffer, t, 0, flags, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } if (BLOCK_ABSTRACT_ORIGIN (block)) { pp_string (buffer, "ABSTRACT_ORIGIN: "); dump_generic_node (buffer, BLOCK_ABSTRACT_ORIGIN (block), 0, flags | TDF_SLIM, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_FRAGMENT_ORIGIN (block)) { pp_string (buffer, "FRAGMENT_ORIGIN: "); dump_generic_node (buffer, BLOCK_FRAGMENT_ORIGIN (block), 0, flags | TDF_SLIM, false); newline_and_indent (buffer, spc + 2); } if (BLOCK_FRAGMENT_CHAIN (block)) { pp_string (buffer, "FRAGMENT_CHAIN: "); for (t = BLOCK_FRAGMENT_CHAIN (block); t; t = BLOCK_FRAGMENT_CHAIN (t)) { dump_generic_node (buffer, t, 0, flags | TDF_SLIM, false); pp_string (buffer, " "); } newline_and_indent (buffer, spc + 2); } } /* Dump the node NODE on the pretty_printer BUFFER, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in tree-pass.h). If IS_STMT is true, the object printed is considered to be a statement and it is terminated by ';' if appropriate. */ int dump_generic_node (pretty_printer *buffer, tree node, int spc, int flags, bool is_stmt) { tree type; tree op0, op1; const char *str; bool is_expr; if (node == NULL_TREE) return spc; is_expr = EXPR_P (node); if (is_stmt && (flags & TDF_STMTADDR)) pp_printf (buffer, "<&%p> ", (void *)node); if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node)) dump_location (buffer, EXPR_LOCATION (node)); switch (TREE_CODE (node)) { case ERROR_MARK: pp_string (buffer, "<<< error >>>"); break; case IDENTIFIER_NODE: pp_tree_identifier (buffer, node); break; case TREE_LIST: while (node && node != error_mark_node) { if (TREE_PURPOSE (node)) { dump_generic_node (buffer, TREE_PURPOSE (node), spc, flags, false); pp_space (buffer); } dump_generic_node (buffer, TREE_VALUE (node), spc, flags, false); node = TREE_CHAIN (node); if (node && TREE_CODE (node) == TREE_LIST) { pp_character (buffer, ','); pp_space (buffer); } } break; case TREE_BINFO: dump_generic_node (buffer, BINFO_TYPE (node), spc, flags, false); break; case TREE_VEC: { size_t i; if (TREE_VEC_LENGTH (node) > 0) { size_t len = TREE_VEC_LENGTH (node); for (i = 0; i < len - 1; i++) { dump_generic_node (buffer, TREE_VEC_ELT (node, i), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); } dump_generic_node (buffer, TREE_VEC_ELT (node, len - 1), spc, flags, false); } } break; case VOID_TYPE: case INTEGER_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: { unsigned int quals = TYPE_QUALS (node); enum tree_code_class tclass; if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); else if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); else if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, "restrict "); if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node))) { pp_string (buffer, "<address-space-"); pp_decimal_int (buffer, TYPE_ADDR_SPACE (node)); pp_string (buffer, "> "); } tclass = TREE_CODE_CLASS (TREE_CODE (node)); if (tclass == tcc_declaration) { if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else pp_string (buffer, "<unnamed type decl>"); } else if (tclass == tcc_type) { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) pp_tree_identifier (buffer, TYPE_NAME (node)); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else pp_string (buffer, "<unnamed type>"); } else if (TREE_CODE (node) == VECTOR_TYPE) { pp_string (buffer, "vector"); pp_character (buffer, '('); pp_wide_integer (buffer, TYPE_VECTOR_SUBPARTS (node)); pp_string (buffer, ") "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == INTEGER_TYPE) { pp_string (buffer, (TYPE_UNSIGNED (node) ? "<unnamed-unsigned:" : "<unnamed-signed:")); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else if (TREE_CODE (node) == COMPLEX_TYPE) { pp_string (buffer, "__complex__ "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == REAL_TYPE) { pp_string (buffer, "<float:"); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else if (TREE_CODE (node) == FIXED_POINT_TYPE) { pp_string (buffer, "<fixed-point-"); pp_string (buffer, TYPE_SATURATING (node) ? "sat:" : "nonsat:"); pp_decimal_int (buffer, TYPE_PRECISION (node)); pp_string (buffer, ">"); } else if (TREE_CODE (node) == VOID_TYPE) pp_string (buffer, "void"); else pp_string (buffer, "<unnamed type>"); } break; } case POINTER_TYPE: case REFERENCE_TYPE: str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&"); if (TREE_TYPE (node) == NULL) { pp_string (buffer, str); pp_string (buffer, "<null type>"); } else if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE) { tree fnode = TREE_TYPE (node); dump_generic_node (buffer, TREE_TYPE (fnode), spc, flags, false); pp_space (buffer); pp_character (buffer, '('); pp_string (buffer, str); if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else if (flags & TDF_NOUID) pp_printf (buffer, "<Txxxx>"); else pp_printf (buffer, "<T%x>", TYPE_UID (node)); pp_character (buffer, ')'); dump_function_declaration (buffer, fnode, spc, flags); } else { unsigned int quals = TYPE_QUALS (node); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); pp_string (buffer, str); if (quals & TYPE_QUAL_CONST) pp_string (buffer, " const"); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, " volatile"); if (quals & TYPE_QUAL_RESTRICT) pp_string (buffer, " restrict"); if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node))) { pp_string (buffer, " <address-space-"); pp_decimal_int (buffer, TYPE_ADDR_SPACE (node)); pp_string (buffer, ">"); } if (TYPE_REF_CAN_ALIAS_ALL (node)) pp_string (buffer, " {ref-all}"); } break; case OFFSET_TYPE: NIY; break; case MEM_REF: { if (integer_zerop (TREE_OPERAND (node, 1)) /* Dump the types of INTEGER_CSTs explicitly, for we can't infer them and MEM_ATTR caching will share MEM_REFs with differently-typed op0s. */ && TREE_CODE (TREE_OPERAND (node, 0)) != INTEGER_CST /* Released SSA_NAMES have no TREE_TYPE. */ && TREE_TYPE (TREE_OPERAND (node, 0)) != NULL_TREE /* Same pointer types, but ignoring POINTER_TYPE vs. REFERENCE_TYPE. */ && (TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 0))) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1)))) && (TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 0))) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 1)))) && (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 0))) == TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 1)))) /* Same value types ignoring qualifiers. */ && (TYPE_MAIN_VARIANT (TREE_TYPE (node)) == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1)))))) { if (TREE_CODE (TREE_OPERAND (node, 0)) != ADDR_EXPR) { pp_string (buffer, "*"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); } else dump_generic_node (buffer, TREE_OPERAND (TREE_OPERAND (node, 0), 0), spc, flags, false); } else { tree ptype; pp_string (buffer, "MEM["); pp_string (buffer, "("); ptype = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (node, 1))); dump_generic_node (buffer, ptype, spc, flags | TDF_SLIM, false); pp_string (buffer, ")"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); if (!integer_zerop (TREE_OPERAND (node, 1))) { pp_string (buffer, " + "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); } pp_string (buffer, "]"); } break; } case TARGET_MEM_REF: { const char *sep = ""; tree tmp; pp_string (buffer, "MEM["); if (TREE_CODE (TMR_BASE (node)) == ADDR_EXPR) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "symbol: "); dump_generic_node (buffer, TREE_OPERAND (TMR_BASE (node), 0), spc, flags, false); } else { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "base: "); dump_generic_node (buffer, TMR_BASE (node), spc, flags, false); } tmp = TMR_INDEX2 (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "base: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_INDEX (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "index: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_STEP (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "step: "); dump_generic_node (buffer, tmp, spc, flags, false); } tmp = TMR_OFFSET (node); if (tmp) { pp_string (buffer, sep); sep = ", "; pp_string (buffer, "offset: "); dump_generic_node (buffer, tmp, spc, flags, false); } pp_string (buffer, "]"); } break; case ARRAY_TYPE: { tree tmp; /* Print the innermost component type. */ for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) ; dump_generic_node (buffer, tmp, spc, flags, false); /* Print the dimensions. */ for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); break; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { unsigned int quals = TYPE_QUALS (node); if (quals & TYPE_QUAL_CONST) pp_string (buffer, "const "); if (quals & TYPE_QUAL_VOLATILE) pp_string (buffer, "volatile "); /* Print the name of the structure. */ if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if (TREE_CODE (node) == UNION_TYPE) pp_string (buffer, "union "); if (TYPE_NAME (node)) dump_generic_node (buffer, TYPE_NAME (node), spc, flags, false); else if (!(flags & TDF_SLIM)) /* FIXME: If we eliminate the 'else' above and attempt to show the fields for named types, we may get stuck following a cycle of pointers to structs. The alleged self-reference check in print_struct_decl will not detect cycles involving more than one pointer or struct type. */ print_struct_decl (buffer, node, spc, flags); break; } case LANG_TYPE: NIY; break; case INTEGER_CST: if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE) { /* In the case of a pointer, one may want to divide by the size of the pointed-to type. Unfortunately, this not straightforward. The C front-end maps expressions (int *) 5 int *p; (p + 5) in such a way that the two INTEGER_CST nodes for "5" have different values but identical types. In the latter case, the 5 is multiplied by sizeof (int) in c-common.c (pointer_int_sum) to convert it to a byte address, and yet the type of the node is left unchanged. Argh. What is consistent though is that the number value corresponds to bytes (UNITS) offset. NB: Neither of the following divisors can be trivially used to recover the original literal: TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); pp_string (buffer, "B"); /* pseudo-unit */ } else if (host_integerp (node, 0)) pp_wide_integer (buffer, TREE_INT_CST_LOW (node)); else if (host_integerp (node, 1)) pp_unsigned_wide_integer (buffer, TREE_INT_CST_LOW (node)); else { tree val = node; unsigned HOST_WIDE_INT low = TREE_INT_CST_LOW (val); HOST_WIDE_INT high = TREE_INT_CST_HIGH (val); if (tree_int_cst_sgn (val) < 0) { pp_character (buffer, '-'); high = ~high + !low; low = -low; } /* Would "%x%0*x" or "%x%*0x" get zero-padding on all systems? */ sprintf (pp_buffer (buffer)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, (unsigned HOST_WIDE_INT) high, low); pp_string (buffer, pp_buffer (buffer)->digit_buffer); } break; case REAL_CST: /* Code copied from print_node. */ { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) pp_string (buffer, " overflow"); #if !defined(REAL_IS_NOT_DOUBLE) || defined(REAL_ARITHMETIC) d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) pp_string (buffer, REAL_VALUE_NEGATIVE (d) ? " -Inf" : " Inf"); else if (REAL_VALUE_ISNAN (d)) pp_string (buffer, " Nan"); else { char string[100]; real_to_decimal (string, &d, sizeof (string), 0, 1); pp_string (buffer, string); } #else { HOST_WIDE_INT i; unsigned char *p = (unsigned char *) &TREE_REAL_CST (node); pp_string (buffer, "0x"); for (i = 0; i < sizeof TREE_REAL_CST (node); i++) output_formatted_integer (buffer, "%02x", *p++); } #endif break; } case FIXED_CST: { char string[100]; fixed_to_decimal (string, TREE_FIXED_CST_PTR (node), sizeof (string)); pp_string (buffer, string); break; } case COMPLEX_CST: pp_string (buffer, "__complex__ ("); dump_generic_node (buffer, TREE_REALPART (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_IMAGPART (node), spc, flags, false); pp_string (buffer, ")"); break; case STRING_CST: pp_string (buffer, "\""); pretty_print_string (buffer, TREE_STRING_POINTER (node)); pp_string (buffer, "\""); break; case VECTOR_CST: { tree elt; pp_string (buffer, "{ "); for (elt = TREE_VECTOR_CST_ELTS (node); elt; elt = TREE_CHAIN (elt)) { dump_generic_node (buffer, TREE_VALUE (elt), spc, flags, false); if (TREE_CHAIN (elt)) pp_string (buffer, ", "); } pp_string (buffer, " }"); } break; case FUNCTION_TYPE: case METHOD_TYPE: dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_space (buffer); if (TREE_CODE (node) == METHOD_TYPE) { if (TYPE_METHOD_BASETYPE (node)) dump_decl_name (buffer, TYPE_NAME (TYPE_METHOD_BASETYPE (node)), flags); else pp_string (buffer, "<null method basetype>"); pp_string (buffer, "::"); } if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (buffer, TYPE_NAME (node), flags); else if (flags & TDF_NOUID) pp_printf (buffer, "<Txxxx>"); else pp_printf (buffer, "<T%x>", TYPE_UID (node)); dump_function_declaration (buffer, node, spc, flags); break; case FUNCTION_DECL: case CONST_DECL: dump_decl_name (buffer, node, flags); break; case LABEL_DECL: if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else if (LABEL_DECL_UID (node) != -1) pp_printf (buffer, "<L%d>", (int) LABEL_DECL_UID (node)); else { if (flags & TDF_NOUID) pp_string (buffer, "<D.xxxx>"); else pp_printf (buffer, "<D.%u>", DECL_UID (node)); } break; case TYPE_DECL: if (DECL_IS_BUILTIN (node)) { /* Don't print the declaration of built-in types. */ break; } if (DECL_NAME (node)) dump_decl_name (buffer, node, flags); else if (TYPE_NAME (TREE_TYPE (node)) != node) { if ((TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) && TYPE_METHODS (TREE_TYPE (node))) { /* The type is a c++ class: all structures have at least 4 methods. */ pp_string (buffer, "class "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } else { pp_string (buffer, (TREE_CODE (TREE_TYPE (node)) == UNION_TYPE ? "union" : "struct ")); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); } } else pp_string (buffer, "<anon>"); break; case VAR_DECL: case PARM_DECL: case FIELD_DECL: case DEBUG_EXPR_DECL: case NAMESPACE_DECL: dump_decl_name (buffer, node, flags); break; case RESULT_DECL: pp_string (buffer, "<retval>"); break; case COMPONENT_REF: op0 = TREE_OPERAND (node, 0); str = "."; if (op0 && (TREE_CODE (op0) == INDIRECT_REF || (TREE_CODE (op0) == MEM_REF && TREE_CODE (TREE_OPERAND (op0, 0)) != ADDR_EXPR && integer_zerop (TREE_OPERAND (op0, 1)) /* Dump the types of INTEGER_CSTs explicitly, for we can't infer them and MEM_ATTR caching will share MEM_REFs with differently-typed op0s. */ && TREE_CODE (TREE_OPERAND (op0, 0)) != INTEGER_CST /* Released SSA_NAMES have no TREE_TYPE. */ && TREE_TYPE (TREE_OPERAND (op0, 0)) != NULL_TREE /* Same pointer types, but ignoring POINTER_TYPE vs. REFERENCE_TYPE. */ && (TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 0))) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1)))) && (TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 0))) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 1)))) && (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 0))) == TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 1)))) /* Same value types ignoring qualifiers. */ && (TYPE_MAIN_VARIANT (TREE_TYPE (op0)) == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1)))))))) { op0 = TREE_OPERAND (op0, 0); str = "->"; } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_string (buffer, str); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); op0 = component_ref_field_offset (node); if (op0 && TREE_CODE (op0) != INTEGER_CST) { pp_string (buffer, "{off: "); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, '}'); } break; case BIT_FIELD_REF: pp_string (buffer, "BIT_FIELD_REF <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case ARRAY_REF: case ARRAY_RANGE_REF: op0 = TREE_OPERAND (node, 0); if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); pp_character (buffer, '['); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (node) == ARRAY_RANGE_REF) pp_string (buffer, " ..."); pp_character (buffer, ']'); op0 = array_ref_low_bound (node); op1 = array_ref_element_size (node); if (!integer_zerop (op0) || TREE_OPERAND (node, 2) || TREE_OPERAND (node, 3)) { pp_string (buffer, "{lb: "); dump_generic_node (buffer, op0, spc, flags, false); pp_string (buffer, " sz: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, '}'); } break; case CONSTRUCTOR: { unsigned HOST_WIDE_INT ix; tree field, val; bool is_struct_init = false; bool is_array_init = false; double_int curidx = double_int_zero; pp_character (buffer, '{'); if (TREE_CLOBBER_P (node)) pp_string (buffer, "CLOBBER"); else if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) is_struct_init = true; else if (TREE_CODE (TREE_TYPE (node)) == ARRAY_TYPE && TYPE_DOMAIN (TREE_TYPE (node)) && TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node))) && TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node)))) == INTEGER_CST) { tree minv = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node))); is_array_init = true; curidx = tree_to_double_int (minv); } FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { if (field) { if (is_struct_init) { pp_character (buffer, '.'); dump_generic_node (buffer, field, spc, flags, false); pp_character (buffer, '='); } else if (is_array_init && (TREE_CODE (field) != INTEGER_CST || !double_int_equal_p (tree_to_double_int (field), curidx))) { pp_character (buffer, '['); if (TREE_CODE (field) == RANGE_EXPR) { dump_generic_node (buffer, TREE_OPERAND (field, 0), spc, flags, false); pp_string (buffer, " ... "); dump_generic_node (buffer, TREE_OPERAND (field, 1), spc, flags, false); if (TREE_CODE (TREE_OPERAND (field, 1)) == INTEGER_CST) curidx = tree_to_double_int (TREE_OPERAND (field, 1)); } else dump_generic_node (buffer, field, spc, flags, false); if (TREE_CODE (field) == INTEGER_CST) curidx = tree_to_double_int (field); pp_string (buffer, "]="); } } if (is_array_init) curidx = double_int_add (curidx, double_int_one); if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); if (val && TREE_CODE (val) == FUNCTION_DECL) dump_decl_name (buffer, val, flags); else dump_generic_node (buffer, val, spc, flags, false); if (ix != VEC_length (constructor_elt, CONSTRUCTOR_ELTS (node)) - 1) { pp_character (buffer, ','); pp_space (buffer); } } pp_character (buffer, '}'); } break; case COMPOUND_EXPR: { tree *tp; if (flags & TDF_SLIM) { pp_string (buffer, "<COMPOUND_EXPR>"); break; } dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } for (tp = &TREE_OPERAND (node, 1); TREE_CODE (*tp) == COMPOUND_EXPR; tp = &TREE_OPERAND (*tp, 1)) { dump_generic_node (buffer, TREE_OPERAND (*tp, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (buffer, spc); else { pp_character (buffer, ','); pp_space (buffer); } } dump_generic_node (buffer, *tp, spc, flags, !(flags & TDF_SLIM)); } break; case STATEMENT_LIST: { tree_stmt_iterator si; bool first = true; if (flags & TDF_SLIM) { pp_string (buffer, "<STATEMENT_LIST>"); break; } for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si)) { if (!first) newline_and_indent (buffer, spc); else first = false; dump_generic_node (buffer, tsi_stmt (si), spc, flags, true); } } break; case MODIFY_EXPR: case INIT_EXPR: dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); if (TREE_CODE (node) == MODIFY_EXPR && MOVE_NONTEMPORAL (node)) pp_string (buffer, "{nt}"); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case TARGET_EXPR: pp_string (buffer, "TARGET_EXPR <"); dump_generic_node (buffer, TARGET_EXPR_SLOT (node), spc, flags, false); pp_character (buffer, ','); pp_space (buffer); dump_generic_node (buffer, TARGET_EXPR_INITIAL (node), spc, flags, false); pp_character (buffer, '>'); break; case DECL_EXPR: print_declaration (buffer, DECL_EXPR_DECL (node), spc, flags); is_stmt = false; break; case COND_EXPR: if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node) { pp_string (buffer, "if ("); dump_generic_node (buffer, COND_EXPR_COND (node), spc, flags, false); pp_character (buffer, ')'); /* The lowered cond_exprs should always be printed in full. */ if (COND_EXPR_THEN (node) && (IS_EMPTY_STMT (COND_EXPR_THEN (node)) || TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR) && COND_EXPR_ELSE (node) && (IS_EMPTY_STMT (COND_EXPR_ELSE (node)) || TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR)) { pp_space (buffer); dump_generic_node (buffer, COND_EXPR_THEN (node), 0, flags, true); if (!IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { pp_string (buffer, " else "); dump_generic_node (buffer, COND_EXPR_ELSE (node), 0, flags, true); } } else if (!(flags & TDF_SLIM)) { /* Output COND_EXPR_THEN. */ if (COND_EXPR_THEN (node)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_THEN (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } /* Output COND_EXPR_ELSE. */ if (COND_EXPR_ELSE (node) && !IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { newline_and_indent (buffer, spc); pp_string (buffer, "else"); newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, COND_EXPR_ELSE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } } is_expr = false; } else { dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '?'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_space (buffer); pp_character (buffer, ':'); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); } break; case BIND_EXPR: pp_character (buffer, '{'); if (!(flags & TDF_SLIM)) { if (BIND_EXPR_VARS (node)) { pp_newline (buffer); for (op0 = BIND_EXPR_VARS (node); op0; op0 = DECL_CHAIN (op0)) { print_declaration (buffer, op0, spc+2, flags); pp_newline (buffer); } } newline_and_indent (buffer, spc+2); dump_generic_node (buffer, BIND_EXPR_BODY (node), spc+2, flags, true); newline_and_indent (buffer, spc); pp_character (buffer, '}'); } is_expr = false; break; case CALL_EXPR: print_call_name (buffer, CALL_EXPR_FN (node), flags); /* Print parameters. */ pp_space (buffer); pp_character (buffer, '('); { tree arg; call_expr_arg_iterator iter; FOR_EACH_CALL_EXPR_ARG (arg, iter, node) { dump_generic_node (buffer, arg, spc, flags, false); if (more_call_expr_args_p (&iter)) { pp_character (buffer, ','); pp_space (buffer); } } } if (CALL_EXPR_VA_ARG_PACK (node)) { if (call_expr_nargs (node) > 0) { pp_character (buffer, ','); pp_space (buffer); } pp_string (buffer, "__builtin_va_arg_pack ()"); } pp_character (buffer, ')'); op1 = CALL_EXPR_STATIC_CHAIN (node); if (op1) { pp_string (buffer, " [static-chain: "); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ']'); } if (CALL_EXPR_RETURN_SLOT_OPT (node)) pp_string (buffer, " [return slot optimization]"); if (CALL_EXPR_TAILCALL (node)) pp_string (buffer, " [tail call]"); break; case WITH_CLEANUP_EXPR: NIY; break; case CLEANUP_POINT_EXPR: pp_string (buffer, "<<cleanup_point "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">>"); break; case PLACEHOLDER_EXPR: pp_string (buffer, "<PLACEHOLDER_EXPR "); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_character (buffer, '>'); break; /* Binary arithmetic and logic expressions. */ case WIDEN_SUM_EXPR: case WIDEN_MULT_EXPR: case MULT_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case WIDEN_LSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: { const char *op = op_symbol (node); op0 = TREE_OPERAND (node, 0); op1 = TREE_OPERAND (node, 1); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op0) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op0, spc, flags, false); pp_space (buffer); pp_string (buffer, op); pp_space (buffer); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op1) <= op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, op1, spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, op1, spc, flags, false); } break; /* Unary arithmetic and logic expressions. */ case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case INDIRECT_REF: if (TREE_CODE (node) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST || TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL)) ; /* Do not output '&' for strings and function pointers. */ else pp_string (buffer, op_symbol (node)); if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); break; case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_character (buffer, '('); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); } else dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, op_symbol (node)); break; case MIN_EXPR: pp_string (buffer, "MIN_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case MAX_EXPR: pp_string (buffer, "MAX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_character (buffer, '>'); break; case ABS_EXPR: pp_string (buffer, "ABS_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case RANGE_EXPR: NIY; break; case ADDR_SPACE_CONVERT_EXPR: case FIXED_CONVERT_EXPR: case FIX_TRUNC_EXPR: case FLOAT_EXPR: CASE_CONVERT: type = TREE_TYPE (node); op0 = TREE_OPERAND (node, 0); if (type != TREE_TYPE (op0)) { pp_character (buffer, '('); dump_generic_node (buffer, type, spc, flags, false); pp_string (buffer, ") "); } if (op_prio (op0) < op_prio (node)) pp_character (buffer, '('); dump_generic_node (buffer, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_character (buffer, ')'); break; case VIEW_CONVERT_EXPR: pp_string (buffer, "VIEW_CONVERT_EXPR<"); dump_generic_node (buffer, TREE_TYPE (node), spc, flags, false); pp_string (buffer, ">("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, ')'); break; case PAREN_EXPR: pp_string (buffer, "(("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, "))"); break; case NON_LVALUE_EXPR: pp_string (buffer, "NON_LVALUE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case SAVE_EXPR: pp_string (buffer, "SAVE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_character (buffer, '>'); break; case COMPLEX_EXPR: pp_string (buffer, "COMPLEX_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case CONJ_EXPR: pp_string (buffer, "CONJ_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case REALPART_EXPR: pp_string (buffer, "REALPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case IMAGPART_EXPR: pp_string (buffer, "IMAGPART_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case VA_ARG_EXPR: pp_string (buffer, "VA_ARG_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ">"); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: pp_string (buffer, "try"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); newline_and_indent (buffer, spc); pp_string (buffer, (TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case CATCH_EXPR: pp_string (buffer, "catch ("); dump_generic_node (buffer, CATCH_TYPES (node), spc+2, flags, false); pp_string (buffer, ")"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, CATCH_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case EH_FILTER_EXPR: pp_string (buffer, "<<<eh_filter ("); dump_generic_node (buffer, EH_FILTER_TYPES (node), spc+2, flags, false); pp_string (buffer, ")>>>"); newline_and_indent (buffer, spc+2); pp_string (buffer, "{"); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, EH_FILTER_FAILURE (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_string (buffer, "}"); is_expr = false; break; case LABEL_EXPR: op0 = TREE_OPERAND (node, 0); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) break; } dump_generic_node (buffer, op0, spc, flags, false); pp_character (buffer, ':'); if (DECL_NONLOCAL (op0)) pp_string (buffer, " [non-local]"); break; case LOOP_EXPR: pp_string (buffer, "while (1)"); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); newline_and_indent (buffer, spc+4); dump_generic_node (buffer, LOOP_EXPR_BODY (node), spc+4, flags, true); newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case PREDICT_EXPR: pp_string (buffer, "// predicted "); if (PREDICT_EXPR_OUTCOME (node)) pp_string (buffer, "likely by "); else pp_string (buffer, "unlikely by "); pp_string (buffer, predictor_name (PREDICT_EXPR_PREDICTOR (node))); pp_string (buffer, " predictor."); break; case RETURN_EXPR: pp_string (buffer, "return"); op0 = TREE_OPERAND (node, 0); if (op0) { pp_space (buffer); if (TREE_CODE (op0) == MODIFY_EXPR) dump_generic_node (buffer, TREE_OPERAND (op0, 1), spc, flags, false); else dump_generic_node (buffer, op0, spc, flags, false); } break; case EXIT_EXPR: pp_string (buffer, "if ("); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ") break"); break; case SWITCH_EXPR: pp_string (buffer, "switch ("); dump_generic_node (buffer, SWITCH_COND (node), spc, flags, false); pp_character (buffer, ')'); if (!(flags & TDF_SLIM)) { newline_and_indent (buffer, spc+2); pp_character (buffer, '{'); if (SWITCH_BODY (node)) { newline_and_indent (buffer, spc+4); dump_generic_node (buffer, SWITCH_BODY (node), spc+4, flags, true); } else { tree vec = SWITCH_LABELS (node); size_t i, n = TREE_VEC_LENGTH (vec); for (i = 0; i < n; ++i) { tree elt = TREE_VEC_ELT (vec, i); newline_and_indent (buffer, spc+4); if (elt) { dump_generic_node (buffer, elt, spc+4, flags, false); pp_string (buffer, " goto "); dump_generic_node (buffer, CASE_LABEL (elt), spc+4, flags, true); pp_semicolon (buffer); } else pp_string (buffer, "case ???: goto ???;"); } } newline_and_indent (buffer, spc+2); pp_character (buffer, '}'); } is_expr = false; break; case GOTO_EXPR: op0 = GOTO_DESTINATION (node); if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { pp_string (buffer, name); break; } } pp_string (buffer, "goto "); dump_generic_node (buffer, op0, spc, flags, false); break; case ASM_EXPR: pp_string (buffer, "__asm__"); if (ASM_VOLATILE_P (node)) pp_string (buffer, " __volatile__"); pp_character (buffer, '('); dump_generic_node (buffer, ASM_STRING (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_OUTPUTS (node), spc, flags, false); pp_character (buffer, ':'); dump_generic_node (buffer, ASM_INPUTS (node), spc, flags, false); if (ASM_CLOBBERS (node)) { pp_character (buffer, ':'); dump_generic_node (buffer, ASM_CLOBBERS (node), spc, flags, false); } pp_string (buffer, ")"); break; case CASE_LABEL_EXPR: if (CASE_LOW (node) && CASE_HIGH (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); pp_string (buffer, " ... "); dump_generic_node (buffer, CASE_HIGH (node), spc, flags, false); } else if (CASE_LOW (node)) { pp_string (buffer, "case "); dump_generic_node (buffer, CASE_LOW (node), spc, flags, false); } else pp_string (buffer, "default"); pp_character (buffer, ':'); break; case OBJ_TYPE_REF: pp_string (buffer, "OBJ_TYPE_REF("); dump_generic_node (buffer, OBJ_TYPE_REF_EXPR (node), spc, flags, false); pp_character (buffer, ';'); dump_generic_node (buffer, OBJ_TYPE_REF_OBJECT (node), spc, flags, false); pp_character (buffer, '-'); pp_character (buffer, '>'); dump_generic_node (buffer, OBJ_TYPE_REF_TOKEN (node), spc, flags, false); pp_character (buffer, ')'); break; case SSA_NAME: dump_generic_node (buffer, SSA_NAME_VAR (node), spc, flags, false); pp_string (buffer, "_"); pp_decimal_int (buffer, SSA_NAME_VERSION (node)); if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node)) pp_string (buffer, "(ab)"); else if (SSA_NAME_IS_DEFAULT_DEF (node)) pp_string (buffer, "(D)"); break; case WITH_SIZE_EXPR: pp_string (buffer, "WITH_SIZE_EXPR <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ">"); break; case ASSERT_EXPR: pp_string (buffer, "ASSERT_EXPR <"); dump_generic_node (buffer, ASSERT_EXPR_VAR (node), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, ASSERT_EXPR_COND (node), spc, flags, false); pp_string (buffer, ">"); break; case SCEV_KNOWN: pp_string (buffer, "scev_known"); break; case SCEV_NOT_KNOWN: pp_string (buffer, "scev_not_known"); break; case POLYNOMIAL_CHREC: pp_string (buffer, "{"); dump_generic_node (buffer, CHREC_LEFT (node), spc, flags, false); pp_string (buffer, ", +, "); dump_generic_node (buffer, CHREC_RIGHT (node), spc, flags, false); pp_string (buffer, "}_"); dump_generic_node (buffer, CHREC_VAR (node), spc, flags, false); is_stmt = false; break; case REALIGN_LOAD_EXPR: pp_string (buffer, "REALIGN_LOAD <"); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, ">"); break; case VEC_COND_EXPR: pp_string (buffer, " VEC_COND_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PERM_EXPR: pp_string (buffer, " VEC_PERM_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " , "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case DOT_PROD_EXPR: pp_string (buffer, " DOT_PROD_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case WIDEN_MULT_PLUS_EXPR: pp_string (buffer, " WIDEN_MULT_PLUS_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case WIDEN_MULT_MINUS_EXPR: pp_string (buffer, " WIDEN_MULT_MINUS_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case FMA_EXPR: pp_string (buffer, " FMA_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 2), spc, flags, false); pp_string (buffer, " > "); break; case OMP_PARALLEL: pp_string (buffer, "#pragma omp parallel"); dump_omp_clauses (buffer, OMP_PARALLEL_CLAUSES (node), spc, flags); dump_omp_body: if (!(flags & TDF_SLIM) && OMP_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } is_expr = false; break; case OMP_TASK: pp_string (buffer, "#pragma omp task"); dump_omp_clauses (buffer, OMP_TASK_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_FOR: pp_string (buffer, "#pragma omp for"); dump_omp_clauses (buffer, OMP_FOR_CLAUSES (node), spc, flags); if (!(flags & TDF_SLIM)) { int i; if (OMP_FOR_PRE_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); spc += 4; newline_and_indent (buffer, spc); dump_generic_node (buffer, OMP_FOR_PRE_BODY (node), spc, flags, false); } spc -= 2; for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (node)); i++) { spc += 2; newline_and_indent (buffer, spc); pp_string (buffer, "for ("); dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INIT (node), i), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_COND (node), i), spc, flags, false); pp_string (buffer, "; "); dump_generic_node (buffer, TREE_VEC_ELT (OMP_FOR_INCR (node), i), spc, flags, false); pp_string (buffer, ")"); } if (OMP_FOR_BODY (node)) { newline_and_indent (buffer, spc + 2); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 4); dump_generic_node (buffer, OMP_FOR_BODY (node), spc + 4, flags, false); newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } spc -= 2 * TREE_VEC_LENGTH (OMP_FOR_INIT (node)) - 2; if (OMP_FOR_PRE_BODY (node)) { spc -= 4; newline_and_indent (buffer, spc + 2); pp_character (buffer, '}'); } } is_expr = false; break; case OMP_SECTIONS: pp_string (buffer, "#pragma omp sections"); dump_omp_clauses (buffer, OMP_SECTIONS_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_SECTION: pp_string (buffer, "#pragma omp section"); goto dump_omp_body; case OMP_MASTER: pp_string (buffer, "#pragma omp master"); goto dump_omp_body; case OMP_ORDERED: pp_string (buffer, "#pragma omp ordered"); goto dump_omp_body; case OMP_CRITICAL: pp_string (buffer, "#pragma omp critical"); if (OMP_CRITICAL_NAME (node)) { pp_space (buffer); pp_character (buffer, '('); dump_generic_node (buffer, OMP_CRITICAL_NAME (node), spc, flags, false); pp_character (buffer, ')'); } goto dump_omp_body; case OMP_ATOMIC: pp_string (buffer, "#pragma omp atomic"); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_ATOMIC_READ: pp_string (buffer, "#pragma omp atomic read"); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); break; case OMP_ATOMIC_CAPTURE_OLD: case OMP_ATOMIC_CAPTURE_NEW: pp_string (buffer, "#pragma omp atomic capture"); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_SINGLE: pp_string (buffer, "#pragma omp single"); dump_omp_clauses (buffer, OMP_SINGLE_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_CLAUSE: dump_omp_clause (buffer, node, spc, flags); is_expr = false; break; case TRANSACTION_EXPR: if (TRANSACTION_EXPR_OUTER (node)) pp_string (buffer, "__transaction_atomic [[outer]]"); else if (TRANSACTION_EXPR_RELAXED (node)) pp_string (buffer, "__transaction_relaxed"); else pp_string (buffer, "__transaction_atomic"); if (!(flags & TDF_SLIM) && TRANSACTION_EXPR_BODY (node)) { newline_and_indent (buffer, spc); pp_character (buffer, '{'); newline_and_indent (buffer, spc + 2); dump_generic_node (buffer, TRANSACTION_EXPR_BODY (node), spc + 2, flags, false); newline_and_indent (buffer, spc); pp_character (buffer, '}'); } is_expr = false; break; case REDUC_MAX_EXPR: pp_string (buffer, " REDUC_MAX_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_MIN_EXPR: pp_string (buffer, " REDUC_MIN_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case REDUC_PLUS_EXPR: pp_string (buffer, " REDUC_PLUS_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_HI_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_MULT_LO_EXPR: pp_string (buffer, " VEC_WIDEN_MULT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_LSHIFT_HI_EXPR: pp_string (buffer, " VEC_WIDEN_LSHIFT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_WIDEN_LSHIFT_LO_EXPR: pp_string (buffer, " VEC_WIDEN_LSHIFT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_HI_EXPR: pp_string (buffer, " VEC_UNPACK_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_LO_EXPR: pp_string (buffer, " VEC_UNPACK_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_HI_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_HI_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_UNPACK_FLOAT_LO_EXPR: pp_string (buffer, " VEC_UNPACK_FLOAT_LO_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_SAT_EXPR: pp_string (buffer, " VEC_PACK_SAT_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case VEC_PACK_FIX_TRUNC_EXPR: pp_string (buffer, " VEC_PACK_FIX_TRUNC_EXPR < "); dump_generic_node (buffer, TREE_OPERAND (node, 0), spc, flags, false); pp_string (buffer, ", "); dump_generic_node (buffer, TREE_OPERAND (node, 1), spc, flags, false); pp_string (buffer, " > "); break; case BLOCK: dump_block_node (buffer, node, spc, flags); break; default: NIY; } if (is_stmt && is_expr) pp_semicolon (buffer); /* If we're building a diagnostic, the formatted text will be written into BUFFER's stream by the caller; otherwise, write it now. */ if (!(flags & TDF_DIAGNOSTIC)) pp_write_text_to_stream (buffer); return spc; } /* Print the declaration of a variable. */ void print_declaration (pretty_printer *buffer, tree t, int spc, int flags) { INDENT (spc); if (TREE_CODE (t) == TYPE_DECL) pp_string (buffer, "typedef "); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t)) pp_string (buffer, "register "); if (TREE_PUBLIC (t) && DECL_EXTERNAL (t)) pp_string (buffer, "extern "); else if (TREE_STATIC (t)) pp_string (buffer, "static "); /* Print the type and name. */ if (TREE_TYPE (t) && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { tree tmp; /* Print array's type. */ tmp = TREE_TYPE (t); while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE) tmp = TREE_TYPE (tmp); dump_generic_node (buffer, TREE_TYPE (tmp), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); /* Print the dimensions. */ tmp = TREE_TYPE (t); while (TREE_CODE (tmp) == ARRAY_TYPE) { dump_array_domain (buffer, TYPE_DOMAIN (tmp), spc, flags); tmp = TREE_TYPE (tmp); } } else if (TREE_CODE (t) == FUNCTION_DECL) { dump_generic_node (buffer, TREE_TYPE (TREE_TYPE (t)), spc, flags, false); pp_space (buffer); dump_decl_name (buffer, t, flags); dump_function_declaration (buffer, TREE_TYPE (t), spc, flags); } else { /* Print type declaration. */ dump_generic_node (buffer, TREE_TYPE (t), spc, flags, false); /* Print variable's name. */ pp_space (buffer); dump_generic_node (buffer, t, spc, flags, false); } if (TREE_CODE (t) == VAR_DECL && DECL_HARD_REGISTER (t)) { pp_string (buffer, " __asm__ "); pp_character (buffer, '('); dump_generic_node (buffer, DECL_ASSEMBLER_NAME (t), spc, flags, false); pp_character (buffer, ')'); } /* The initial value of a function serves to determine whether the function is declared or defined. So the following does not apply to function nodes. */ if (TREE_CODE (t) != FUNCTION_DECL) { /* Print the initial value. */ if (DECL_INITIAL (t)) { pp_space (buffer); pp_character (buffer, '='); pp_space (buffer); dump_generic_node (buffer, DECL_INITIAL (t), spc, flags, false); } } if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t)) { pp_string (buffer, " [value-expr: "); dump_generic_node (buffer, DECL_VALUE_EXPR (t), spc, flags, false); pp_character (buffer, ']'); } pp_character (buffer, ';'); } /* Prints a structure: name, fields, and methods. FIXME: Still incomplete. */ static void print_struct_decl (pretty_printer *buffer, const_tree node, int spc, int flags) { /* Print the name of the structure. */ if (TYPE_NAME (node)) { INDENT (spc); if (TREE_CODE (node) == RECORD_TYPE) pp_string (buffer, "struct "); else if ((TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE)) pp_string (buffer, "union "); dump_generic_node (buffer, TYPE_NAME (node), spc, 0, false); } /* Print the contents of the structure. */ pp_newline (buffer); INDENT (spc); pp_character (buffer, '{'); pp_newline (buffer); /* Print the fields of the structure. */ { tree tmp; tmp = TYPE_FIELDS (node); while (tmp) { /* Avoid to print recursively the structure. */ /* FIXME : Not implemented correctly..., what about the case when we have a cycle in the contain graph? ... Maybe this could be solved by looking at the scope in which the structure was declared. */ if (TREE_TYPE (tmp) != node && (TREE_CODE (TREE_TYPE (tmp)) != POINTER_TYPE || TREE_TYPE (TREE_TYPE (tmp)) != node)) { print_declaration (buffer, tmp, spc+2, flags); pp_newline (buffer); } tmp = DECL_CHAIN (tmp); } } INDENT (spc); pp_character (buffer, '}'); } /* Return the priority of the operator CODE. From lowest to highest precedence with either left-to-right (L-R) or right-to-left (R-L) associativity]: 1 [L-R] , 2 [R-L] = += -= *= /= %= &= ^= |= <<= >>= 3 [R-L] ?: 4 [L-R] || 5 [L-R] && 6 [L-R] | 7 [L-R] ^ 8 [L-R] & 9 [L-R] == != 10 [L-R] < <= > >= 11 [L-R] << >> 12 [L-R] + - 13 [L-R] * / % 14 [R-L] ! ~ ++ -- + - * & (type) sizeof 15 [L-R] fn() [] -> . unary +, - and * have higher precedence than the corresponding binary operators. */ int op_code_prio (enum tree_code code) { switch (code) { case TREE_LIST: case COMPOUND_EXPR: case BIND_EXPR: return 1; case MODIFY_EXPR: case INIT_EXPR: return 2; case COND_EXPR: return 3; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return 4; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return 5; case BIT_IOR_EXPR: return 6; case BIT_XOR_EXPR: case TRUTH_XOR_EXPR: return 7; case BIT_AND_EXPR: return 8; case EQ_EXPR: case NE_EXPR: return 9; case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: return 10; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case VEC_WIDEN_LSHIFT_HI_EXPR: case VEC_WIDEN_LSHIFT_LO_EXPR: case WIDEN_LSHIFT_EXPR: return 11; case WIDEN_SUM_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case MINUS_EXPR: return 12; case VEC_WIDEN_MULT_HI_EXPR: case VEC_WIDEN_MULT_LO_EXPR: case WIDEN_MULT_EXPR: case DOT_PROD_EXPR: case WIDEN_MULT_PLUS_EXPR: case WIDEN_MULT_MINUS_EXPR: case MULT_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case FMA_EXPR: return 13; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case NEGATE_EXPR: case INDIRECT_REF: case ADDR_EXPR: case FLOAT_EXPR: CASE_CONVERT: case FIX_TRUNC_EXPR: case TARGET_EXPR: return 14; case CALL_EXPR: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: return 15; /* Special expressions. */ case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case REDUC_MAX_EXPR: case REDUC_MIN_EXPR: case REDUC_PLUS_EXPR: case VEC_LSHIFT_EXPR: case VEC_RSHIFT_EXPR: case VEC_UNPACK_HI_EXPR: case VEC_UNPACK_LO_EXPR: case VEC_UNPACK_FLOAT_HI_EXPR: case VEC_UNPACK_FLOAT_LO_EXPR: case VEC_PACK_TRUNC_EXPR: case VEC_PACK_SAT_EXPR: return 16; default: /* Return an arbitrarily high precedence to avoid surrounding single VAR_DECLs in ()s. */ return 9999; } } /* Return the priority of the operator OP. */ int op_prio (const_tree op) { enum tree_code code; if (op == NULL) return 9999; code = TREE_CODE (op); if (code == SAVE_EXPR || code == NON_LVALUE_EXPR) return op_prio (TREE_OPERAND (op, 0)); return op_code_prio (code); } /* Return the symbol associated with operator CODE. */ const char * op_symbol_code (enum tree_code code) { switch (code) { case MODIFY_EXPR: return "="; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return "||"; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return "&&"; case BIT_IOR_EXPR: return "|"; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: return "^"; case ADDR_EXPR: case BIT_AND_EXPR: return "&"; case ORDERED_EXPR: return "ord"; case UNORDERED_EXPR: return "unord"; case EQ_EXPR: return "=="; case UNEQ_EXPR: return "u=="; case NE_EXPR: return "!="; case LT_EXPR: return "<"; case UNLT_EXPR: return "u<"; case LE_EXPR: return "<="; case UNLE_EXPR: return "u<="; case GT_EXPR: return ">"; case UNGT_EXPR: return "u>"; case GE_EXPR: return ">="; case UNGE_EXPR: return "u>="; case LTGT_EXPR: return "<>"; case LSHIFT_EXPR: return "<<"; case RSHIFT_EXPR: return ">>"; case LROTATE_EXPR: return "r<<"; case RROTATE_EXPR: return "r>>"; case VEC_LSHIFT_EXPR: return "v<<"; case VEC_RSHIFT_EXPR: return "v>>"; case WIDEN_LSHIFT_EXPR: return "w<<"; case POINTER_PLUS_EXPR: return "+"; case PLUS_EXPR: return "+"; case REDUC_PLUS_EXPR: return "r+"; case WIDEN_SUM_EXPR: return "w+"; case WIDEN_MULT_EXPR: return "w*"; case NEGATE_EXPR: case MINUS_EXPR: return "-"; case BIT_NOT_EXPR: return "~"; case TRUTH_NOT_EXPR: return "!"; case MULT_EXPR: case INDIRECT_REF: return "*"; case TRUNC_DIV_EXPR: case RDIV_EXPR: return "/"; case CEIL_DIV_EXPR: return "/[cl]"; case FLOOR_DIV_EXPR: return "/[fl]"; case ROUND_DIV_EXPR: return "/[rd]"; case EXACT_DIV_EXPR: return "/[ex]"; case TRUNC_MOD_EXPR: return "%"; case CEIL_MOD_EXPR: return "%[cl]"; case FLOOR_MOD_EXPR: return "%[fl]"; case ROUND_MOD_EXPR: return "%[rd]"; case PREDECREMENT_EXPR: return " --"; case PREINCREMENT_EXPR: return " ++"; case POSTDECREMENT_EXPR: return "-- "; case POSTINCREMENT_EXPR: return "++ "; case MAX_EXPR: return "max"; case MIN_EXPR: return "min"; default: return "<<< ??? >>>"; } } /* Return the symbol associated with operator OP. */ static const char * op_symbol (const_tree op) { return op_symbol_code (TREE_CODE (op)); } /* Prints the name of a call. NODE is the CALL_EXPR_FN of a CALL_EXPR or the gimple_call_fn of a GIMPLE_CALL. */ void print_call_name (pretty_printer *buffer, tree node, int flags) { tree op0 = node; if (TREE_CODE (op0) == NON_LVALUE_EXPR) op0 = TREE_OPERAND (op0, 0); again: switch (TREE_CODE (op0)) { case VAR_DECL: case PARM_DECL: case FUNCTION_DECL: dump_function_name (buffer, op0, flags); break; case ADDR_EXPR: case INDIRECT_REF: case NOP_EXPR: op0 = TREE_OPERAND (op0, 0); goto again; case COND_EXPR: pp_string (buffer, "("); dump_generic_node (buffer, TREE_OPERAND (op0, 0), 0, flags, false); pp_string (buffer, ") ? "); dump_generic_node (buffer, TREE_OPERAND (op0, 1), 0, flags, false); pp_string (buffer, " : "); dump_generic_node (buffer, TREE_OPERAND (op0, 2), 0, flags, false); break; case ARRAY_REF: if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (buffer, TREE_OPERAND (op0, 0), flags); else dump_generic_node (buffer, op0, 0, flags, false); break; case MEM_REF: if (integer_zerop (TREE_OPERAND (op0, 1))) { op0 = TREE_OPERAND (op0, 0); goto again; } /* Fallthru. */ case COMPONENT_REF: case SSA_NAME: case OBJ_TYPE_REF: dump_generic_node (buffer, op0, 0, flags, false); break; default: NIY; } } /* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */ static void pretty_print_string (pretty_printer *buffer, const char *str) { if (str == NULL) return; while (*str) { switch (str[0]) { case '\b': pp_string (buffer, "\\b"); break; case '\f': pp_string (buffer, "\\f"); break; case '\n': pp_string (buffer, "\\n"); break; case '\r': pp_string (buffer, "\\r"); break; case '\t': pp_string (buffer, "\\t"); break; case '\v': pp_string (buffer, "\\v"); break; case '\\': pp_string (buffer, "\\\\"); break; case '\"': pp_string (buffer, "\\\""); break; case '\'': pp_string (buffer, "\\'"); break; /* No need to handle \0; the loop terminates on \0. */ case '\1': pp_string (buffer, "\\1"); break; case '\2': pp_string (buffer, "\\2"); break; case '\3': pp_string (buffer, "\\3"); break; case '\4': pp_string (buffer, "\\4"); break; case '\5': pp_string (buffer, "\\5"); break; case '\6': pp_string (buffer, "\\6"); break; case '\7': pp_string (buffer, "\\7"); break; default: pp_character (buffer, str[0]); break; } str++; } } static void maybe_init_pretty_print (FILE *file) { if (!initialized) { pp_construct (&buffer, /* prefix */NULL, /* line-width */0); pp_needs_newline (&buffer) = true; pp_translate_identifiers (&buffer) = false; initialized = 1; } buffer.buffer->stream = file; } static void newline_and_indent (pretty_printer *buffer, int spc) { pp_newline (buffer); INDENT (spc); } /* Handle a %K format for TEXT. Separate from default_tree_printer so it can also be used in front ends. %K: a statement, from which EXPR_LOCATION and TREE_BLOCK will be recorded. */ void percent_K_format (text_info *text) { tree t = va_arg (*text->args_ptr, tree), block; gcc_assert (text->locus != NULL); *text->locus = EXPR_LOCATION (t); gcc_assert (pp_ti_abstract_origin (text) != NULL); block = TREE_BLOCK (t); *pp_ti_abstract_origin (text) = NULL; while (block && TREE_CODE (block) == BLOCK && BLOCK_ABSTRACT_ORIGIN (block)) { tree ao = BLOCK_ABSTRACT_ORIGIN (block); while (TREE_CODE (ao) == BLOCK && BLOCK_ABSTRACT_ORIGIN (ao) && BLOCK_ABSTRACT_ORIGIN (ao) != ao) ao = BLOCK_ABSTRACT_ORIGIN (ao); if (TREE_CODE (ao) == FUNCTION_DECL) { *pp_ti_abstract_origin (text) = block; break; } block = BLOCK_SUPERCONTEXT (block); } } /* Print the identifier ID to PRETTY-PRINTER. */ void pp_base_tree_identifier (pretty_printer *pp, tree id) { if (pp_translate_identifiers (pp)) { const char *text = identifier_to_locale (IDENTIFIER_POINTER (id)); pp_append_text (pp, text, text + strlen (text)); } else pp_append_text (pp, IDENTIFIER_POINTER (id), IDENTIFIER_POINTER (id) + IDENTIFIER_LENGTH (id)); } /* A helper function that is used to dump function information before the function dump. */ void dump_function_header (FILE *dump_file, tree fdecl, int flags) { const char *dname, *aname; struct cgraph_node *node = cgraph_get_node (fdecl); struct function *fun = DECL_STRUCT_FUNCTION (fdecl); dname = lang_hooks.decl_printable_name (fdecl, 2); if (DECL_ASSEMBLER_NAME_SET_P (fdecl)) aname = (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fdecl))); else aname = "<unset-asm-name>"; fprintf (dump_file, "\n;; Function %s (%s, funcdef_no=%d", dname, aname, fun->funcdef_no); if (!(flags & TDF_NOUID)) fprintf (dump_file, ", decl_uid=%d", DECL_UID (fdecl)); if (node) { fprintf (dump_file, ", cgraph_uid=%d)%s\n\n", node->uid, node->frequency == NODE_FREQUENCY_HOT ? " (hot)" : node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED ? " (unlikely executed)" : node->frequency == NODE_FREQUENCY_EXECUTED_ONCE ? " (executed once)" : ""); } else fprintf (dump_file, ")\n\n"); }
task_codegen.c
// RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -no-opaque-pointers -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER typedef void *omp_depend_t; typedef __UINTPTR_TYPE__ omp_event_handle_t; void foo(void); // CHECK-LABEL: @main int main(void) { omp_depend_t d, x; omp_event_handle_t evt; int a, *b; // CHECK: [[D_ADDR:%.+]] = alloca i8*, // CHECK: [[X_ADDR:%.+]] = alloca i8*, // CHECK: [[EVT_ADDR:%.+]] = alloca i64, // CHECK: [[A_ADDR:%.+]] = alloca i32, // CHECK: [[DEPOBJ_SIZE_ADDR:%.+]] = alloca i64, // CHECK: [[DEPOBJ_SIZE_ADDR1:%.+]] = alloca i64, // CHECK: = alloca i64, // CHECK: [[DEP_COUNTER_ADDR:%.+]] = alloca i64, // CHECK: [[GTID:%.+]] = call i32 @__kmpc_global_thread_num( // CHECK: [[ALLOC:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 [[GTID]], i32 65, i64 48, i64 0, i32 (i32, i8*)* bitcast (i32 (i32, [[PRIVATES_TY:%.+]]*)* [[TASK_ENTRY:@.+]] to i32 (i32, i8*)*)) // CHECK: [[EVT_VAL:%.+]] = call i8* @__kmpc_task_allow_completion_event(%struct.ident_t* @{{.+}}, i32 [[GTID]], i8* [[ALLOC]]) // CHECK: [[CAST_EVT_VAL:%.+]] = ptrtoint i8* [[EVT_VAL]] to i64 // CHECK: store i64 [[CAST_EVT_VAL]], i64* [[EVT_ADDR]], align 8 // CHECK: [[DATA:%.+]] = bitcast i8* [[ALLOC]] to [[PRIVATES_TY]]* // CHECK: [[D_ADDR_CAST:%.+]] = bitcast i8** [[D_ADDR]] to %struct.kmp_depend_info** // CHECK: [[D_DEP:%.+]] = load %struct.kmp_depend_info*, %struct.kmp_depend_info** [[D_ADDR_CAST]], align 8 // CHECK: [[D_DEP_BASE:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[D_DEP]], i{{.+}} -1 // CHECK: [[D_DEP_BASE_SIZE:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[D_DEP_BASE]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[SIZE1:%.+]] = load i64, i64* [[D_DEP_BASE_SIZE]], align 8 // CHECK-DAG: store i64 0, i64* [[DEPOBJ_SIZE_ADDR]], align 8 // CHECK: [[SZ:%.+]] = load i64, i64* [[DEPOBJ_SIZE_ADDR]], align 8 // CHECK: [[SIZE:%.+]] = add nuw i64 [[SZ]], [[SIZE1]] // CHECK: store i64 [[SIZE]], i64* [[DEPOBJ_SIZE_ADDR]], align 8 // CHECK: [[X_ADDR_CAST:%.+]] = bitcast i8** [[X_ADDR]] to %struct.kmp_depend_info** // CHECK: [[X_DEP:%.+]] = load %struct.kmp_depend_info*, %struct.kmp_depend_info** [[X_ADDR_CAST]], align 8 // CHECK: [[X_DEP_BASE:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[X_DEP]], i{{.+}} -1 // CHECK: [[X_DEP_BASE_SIZE:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[X_DEP_BASE]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[SIZE2:%.+]] = load i64, i64* [[X_DEP_BASE_SIZE]], align 8 // CHECK-DAG: store i64 0, i64* [[DEPOBJ_SIZE_ADDR1]], align 8 // CHECK: [[SZ:%.+]] = load i64, i64* [[DEPOBJ_SIZE_ADDR1]], align 8 // CHECK: [[SIZE3:%.+]] = add nuw i64 [[SZ]], [[SIZE2]] // CHECK: store i64 [[SIZE3]], i64* [[DEPOBJ_SIZE_ADDR1]], align 8 // CHECK: [[SZ:%.+]] = load i64, i64* [[DEPOBJ_SIZE_ADDR]], align 8 // CHECK: [[SZ1:%.+]] = load i64, i64* [[DEPOBJ_SIZE_ADDR1]], align 8 // CHECK: [[SIZE1:%.+]] = add nuw i64 0, [[SZ]] // CHECK: [[SIZE2:%.+]] = add nuw i64 [[SIZE1]], [[SZ1]] // CHECK: [[SIZE:%.+]] = add nuw i64 [[SIZE2]], 2 // CHECK: [[SV:%.+]] = call i8* @llvm.stacksave() // CHECK: store i8* [[SV]], i8** [[SV_ADDR:%.+]], align 8 // CHECK: [[VLA:%.+]] = alloca %struct.kmp_depend_info, i64 [[SIZE]], // CHECK: [[SIZE32:%.+]] = trunc i64 [[SIZE]] to i32 // CHECK: [[VLA0:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA]], i64 0 // CHECK: [[BASE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA0]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[A_ADDR_CAST:%.+]] = ptrtoint i32* [[A_ADDR]] to i64 // CHECK: store i64 [[A_ADDR_CAST]], i64* [[BASE_ADDR]], align 16 // CHECK: [[SIZE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA0]], i{{.+}} 0, i{{.+}} 1 // CHECK: store i64 4, i64* [[SIZE_ADDR]], align 8 // CHECK: [[FLAGS_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA0]], i{{.+}} 0, i{{.+}} 2 // CHECK: store i8 1, i8* [[FLAGS_ADDR]], align 1 // CHECK: [[A:%.+]] = load i32, i32* [[A_ADDR]], align 4 // CHECK: [[A_CAST:%.+]] = sext i32 [[A]] to i64 // CHECK: [[SZ1:%.+]] = mul nuw i64 24, [[A_CAST]] // CHECK: [[A:%.+]] = load i32, i32* [[A_ADDR]], align 4 // CHECK: [[A_CAST:%.+]] = sext i32 [[A]] to i64 // CHECK: [[SZ:%.+]] = mul nuw i64 [[SZ1]], [[A_CAST]] // CHECK: [[VLA1:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA]], i64 1 // CHECK: [[BASE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA1]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[B_ADDR_CAST:%.+]] = ptrtoint i32** %{{.+}} to i64 // CHECK: store i64 [[B_ADDR_CAST]], i64* [[BASE_ADDR]], align 8 // CHECK: [[SIZE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA1]], i{{.+}} 0, i{{.+}} 1 // CHECK: store i64 [[SZ]], i64* [[SIZE_ADDR]], align 8 // CHECK: [[FLAGS_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA1]], i{{.+}} 0, i{{.+}} 2 // CHECK: store i8 1, i8* [[FLAGS_ADDR]], align 8 // CHECK: store i64 2, i64* [[DEP_COUNTER_ADDR]], align 8 // CHECK: [[D_ADDR_CAST:%.+]] = bitcast i8** [[D_ADDR]] to %struct.kmp_depend_info** // CHECK: [[BC:%.+]] = load %struct.kmp_depend_info*, %struct.kmp_depend_info** [[D_ADDR_CAST]], align 8 // CHECK: [[PREV:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[BC]], i64 -1 // CHECK: [[SIZE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[PREV]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[SIZE:%.+]] = load i64, i64* [[SIZE_ADDR]], align 8 // CHECK: [[BYTES:%.+]] = mul nuw i64 24, [[SIZE]] // CHECK: [[POS:%.+]] = load i64, i64* [[DEP_COUNTER_ADDR]], align 8 // CHECK: [[VLA_D:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA]], i64 [[POS]] // CHECK: [[DEST:%.+]] = bitcast %struct.kmp_depend_info* [[VLA_D]] to i8* // CHECK: [[SRC:%.+]] = bitcast %struct.kmp_depend_info* [[BC]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align {{.+}} [[DEST]], i8* align {{.+}} [[SRC]], i64 [[BYTES]], i1 false) // CHECK: [[ADD:%.+]] = add nuw i64 [[POS]], [[SIZE]] // CHECK: store i64 [[ADD]], i64* [[DEP_COUNTER_ADDR]], align 8 // CHECK: [[X_ADDR_CAST:%.+]] = bitcast i8** [[X_ADDR]] to %struct.kmp_depend_info** // CHECK: [[BC:%.+]] = load %struct.kmp_depend_info*, %struct.kmp_depend_info** [[X_ADDR_CAST]], align 8 // CHECK: [[PREV:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[BC]], i64 -1 // CHECK: [[SIZE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[PREV]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[SIZE:%.+]] = load i64, i64* [[SIZE_ADDR]], align 8 // CHECK: [[BYTES:%.+]] = mul nuw i64 24, [[SIZE]] // CHECK: [[POS:%.+]] = load i64, i64* [[DEP_COUNTER_ADDR]], align 8 // CHECK: [[VLA_X:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[VLA]], i64 [[POS]] // CHECK: [[DEST:%.+]] = bitcast %struct.kmp_depend_info* [[VLA_X]] to i8* // CHECK: [[SRC:%.+]] = bitcast %struct.kmp_depend_info* [[BC]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align {{.+}} [[DEST]], i8* align {{.+}} [[SRC]], i64 [[BYTES]], i1 false) // CHECK: [[ADD:%.+]] = add nuw i64 [[POS]], [[SIZE]] // CHECK: store i64 [[ADD]], i64* [[DEP_COUNTER_ADDR]], align 8 // CHECK: [[BC:%.+]] = bitcast %struct.kmp_depend_info* [[VLA]] to i8* // CHECK: call i32 @__kmpc_omp_task_with_deps(%struct.ident_t* @{{.+}}, i32 [[GTID]], i8* [[ALLOC]], i32 [[SIZE32]], i8* [[BC]], i32 0, i8* null) // CHECK: [[SV:%.+]] = load i8*, i8** [[SV_ADDR]], align 8 // CHECK: call void @llvm.stackrestore(i8* [[SV]]) #pragma omp task depend(in: a, ([3][a][a])&b) depend(depobj: d, x) detach(evt) { #pragma omp taskgroup { #pragma omp task foo(); } } // CHECK: ret i32 0 return 0; } // CHECK: call void @__kmpc_taskgroup( // CHECK: call i8* @__kmpc_omp_task_alloc( // CHECK: call i32 @__kmpc_omp_task( // CHECK: call void @__kmpc_end_taskgroup( // CHECK-LINE: @bar void bar(void) { int **a; // CHECK: call void @__kmpc_for_static_init_4( #pragma omp for for (int i = 0; i < 10; ++i) // CHECK: [[BUF:%.+]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @{{.+}}, i32 %{{.+}}, i32 1, i64 48, // CHECK: [[BC_BUF:%.+]] = bitcast i8* [[BUF]] to [[TT_WITH_PRIVS:%.+]]* // CHECK: [[PRIVS:%.+]] = getelementptr inbounds [[TT_WITH_PRIVS]], [[TT_WITH_PRIVS]]* [[BC_BUF]], i32 0, i32 1 // CHECK: [[I_PRIV:%.+]] = getelementptr inbounds %{{.+}}, %{{.+}} [[PRIVS]], i32 0, i32 0 // CHECK: [[I:%.+]] = load i32, i32* [[I_ADDR:%.+]], // CHECK: store i32 %{{.+}}, i32* [[I_PRIV]], // NELEMS = 1 * ((i - 0 + 2 - 1) / 2); // CHECK: [[END:%.+]] = load i32, i32* [[I_ADDR]], // CHECK: [[EB_SUB:%.+]] = sub i32 [[END]], 0 // CHECK: [[EB_SUB_2_ADD:%.+]] = add i32 [[EB_SUB]], 2 // CHECK: [[EB_SUB_2_ADD_1_SUB:%.+]] = sub i32 [[EB_SUB_2_ADD]], 1 // CHECK: [[EB_SUB_2_ADD_1_SUB_2_DIV:%.+]] = udiv i32 [[EB_SUB_2_ADD_1_SUB]], 2 // CHECK: [[ELEMS:%.+]] = zext i32 [[EB_SUB_2_ADD_1_SUB_2_DIV]] to i64 // CHECK: [[NELEMS:%.+]] = mul nuw i64 [[ELEMS]], 1 // ITERATOR_TOTAL = NELEMS + 0; // CHECK: [[ITERATOR_TOTAL:%.+]] = add nuw i64 0, [[NELEMS]] // NELEMS = ITERATOR_TOTAL + non-iterator-deps (=0) // CHECK: [[TOTAL:%.+]] = add nuw i64 [[ITERATOR_TOTAL]], 0 // %struct.kmp_depend_info DEPS[TOTAL]; // CHECK: [[DEPS:%.+]] = alloca %struct.kmp_depend_info, i64 [[TOTAL]], // CHECK: [[NDEPS:%.+]] = trunc i64 [[TOTAL]] to i32 // i64 DEP_COUNTER = 0; // CHECK: store i64 0, i64* [[DEP_COUNTER_ADDR:%.+]], // NELEMS = ((i - 0 + 2 - 1) / 2); // CHECK: [[END:%.+]] = load i32, i32* [[I_ADDR]], // CHECK: [[EB_SUB:%.+]] = sub i32 [[END]], 0 // CHECK: [[EB_SUB_2_ADD:%.+]] = add i32 [[EB_SUB]], 2 // CHECK: [[EB_SUB_2_ADD_1_SUB:%.+]] = sub i32 [[EB_SUB_2_ADD]], 1 // CHECK: [[ELEMS:%.+]] = udiv i32 [[EB_SUB_2_ADD_1_SUB]], 2 // i32 COUNTER = 0; // CHECK: store i32 0, i32* [[COUNTER_ADDR:%.+]], // CHECK: br label %[[CONT:.+]] // Loop. // CHECK: [[CONT]]: // CHECK: [[COUNTER:%.+]] = load i32, i32* [[COUNTER_ADDR]], // CHECK: [[CMP:%.+]] = icmp ult i32 [[COUNTER]], [[ELEMS]] // CHECK: br i1 [[CMP]], label %[[BODY:.+]], label %[[EXIT:.+]] // CHECK: [[BODY]]: // k = 0 + 2*COUNTER; // CHECK: [[COUNTER:%.+]] = load i32, i32* [[COUNTER_ADDR]], // CHECK: [[C2_MUL:%.+]] = mul i32 [[COUNTER]], 2 // CHECK: [[C2_MUL_0_ADD:%.+]] = add i32 0, [[C2_MUL]] // CHECK: store i32 [[C2_MUL_0_ADD]], i32* [[K_ADDR:%.+]], // &a[k][i] // CHECK: [[A:%.+]] = load i32**, i32*** [[A_ADDR:%.+]], // CHECK: [[K:%.+]] = load i32, i32* [[K_ADDR]], // CHECK: [[IDX:%.+]] = zext i32 [[K]] to i64 // CHECK: [[AK_ADDR:%.+]] = getelementptr inbounds i32*, i32** [[A]], i64 [[IDX]] // CHECK: [[AK:%.+]] = load i32*, i32** [[AK_ADDR]], // CHECK: [[I:%.+]] = load i32, i32* [[I_ADDR]], // CHECK: [[IDX:%.+]] = sext i32 [[I]] to i64 // CHECK: [[AKI_ADDR:%.+]] = getelementptr inbounds i32, i32* [[AK]], i64 [[IDX]] // DEPS[DEP_COUNTER].base_addr = &a[k][i]; // CHECK: [[DEP_COUNTER:%.+]] = load i64, i64* [[DEP_COUNTER_ADDR]], // CHECK: [[DEPS_DC:%.+]] = getelementptr %struct.kmp_depend_info, %struct.kmp_depend_info* [[DEPS]], i64 [[DEP_COUNTER]] // CHECK: [[DEPS_DC_BASE_ADDR:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[DEPS_DC]], i{{.+}} 0, i{{.+}} 0 // CHECK: [[AKI_INT:%.+]] = ptrtoint i32* [[AKI_ADDR]] to i64 // CHECK: store i64 [[AKI_INT]], i64* [[DEPS_DC_BASE_ADDR]], // DEPS[DEP_COUNTER].size = sizeof(a[k][i]); // CHECK: [[DEPS_DC_SIZE:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[DEPS_DC]], i{{.+}} 0, i{{.+}} 1 // CHECK: store i64 4, i64* [[DEPS_DC_SIZE]], // DEPS[DEP_COUNTER].flags = in; // CHECK: [[DEPS_DC_FLAGS:%.+]] = getelementptr inbounds %struct.kmp_depend_info, %struct.kmp_depend_info* [[DEPS_DC]], i{{.+}} 0, i{{.+}} 2 // CHECK: store i8 1, i8* [[DEPS_DC_FLAGS]], // DEP_COUNTER = DEP_COUNTER + 1; // CHECK: [[DEP_COUNTER:%.+]] = load i64, i64* [[DEP_COUNTER_ADDR]], // CHECK: [[INC:%.+]] = add nuw i64 [[DEP_COUNTER]], 1 // CHECK: store i64 [[INC]], i64* [[DEP_COUNTER_ADDR]], // COUNTER = COUNTER + 1; // CHECK: [[COUNTER:%.+]] = load i32, i32* [[COUNTER_ADDR]], // CHECK: [[INC:%.+]] = add i32 [[COUNTER]], 1 // CHECK: store i32 [[INC]], i32* [[COUNTER_ADDR]], // CHECK: br label %[[CONT]] // CHECK: [[EXIT]]: // CHECK: [[DEP_BEGIN:%.+]] = bitcast %struct.kmp_depend_info* [[DEPS]] to i8* // CHECK: = call i32 @__kmpc_omp_task_with_deps(%struct.ident_t* @{{.+}}, i32 %{{.+}}, i8* [[BUF]], i32 [[NDEPS]], i8* [[DEP_BEGIN]], i32 0, i8* null) #pragma omp task depend(iterator(unsigned k=0:i:2), in: a[k][i]) ++i; } #endif
nodal_residualbased_block_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER ) #define KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* #include <unordered_set> */ /* #ifdef USE_GOOGLE_HASH */ /* #include "sparsehash/dense_hash_set" //included in external libraries */ /* #endif */ #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "utilities/openmp_utils.h" #include "includes/kratos_flags.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedBlockBuilderAndSolver * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedBlockBuilderAndSolver : public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver > { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedBlockBuilderAndSolver); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedBlockBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >(pNewLinearSystemSolver) { } /** Destructor. */ ~NodalResidualBasedBlockBuilderAndSolver() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Function to perform the build of the RHS. The vector could be sized as the total number * of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param b The RHS vector */ void Build( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& b) override { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model const int nelements = static_cast<int>(rModelPart.Elements().size()); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements double start_build = OpenMPUtils::GetCurrentTime(); #pragma omp parallel firstprivate(nelements,nconditions, LHS_Contribution, RHS_Contribution, EquationId ) { # pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; k++) { ModelPart::ElementsContainerType::iterator it = el_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it)->IsDefined(ACTIVE)) element_is_active = (it)->Is(ACTIVE); if (element_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*(it.base())); } } //#pragma omp parallel for firstprivate(nconditions, LHS_Contribution, RHS_Contribution, EquationId ) schedule(dynamic, 1024) #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; k++) { ModelPart::ConditionsContainerType::iterator it = cond_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active) { //calculate elemental contribution pScheme->Condition_CalculateSystemContributions(*(it.base()), LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*(it.base())); } } } const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl; //for (int i = 0; i < A_size; i++) // omp_destroy_lock(&lock_array[i]); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl; KRATOS_CATCH("") } void BuildNodally( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; std::cout<<"Build Nodally Continuity Equation"<<std::endl; //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); /* const double start_build = OpenMPUtils::GetCurrentTime(); */ /* #pragma omp parallel */ { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { VectorType nodalSFDneighboursId=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if(neighSize>1){ const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double nodalVolume=itNode->FastGetSolutionStepValue(NODAL_VOLUME); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; LHS_Contribution= ZeroMatrix(neighSize,neighSize); RHS_Contribution= ZeroVector(neighSize); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); double deviatoricCoeff=itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); double volumetricCoeff=itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT)+2.0*deviatoricCoeff/3.0; const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X); double deltaPressure=itNode->FastGetSolutionStepValue(PRESSURE,0)-itNode->FastGetSolutionStepValue(PRESSURE,1); double volumetricDefRate= itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE); LHS_Contribution(0,0)+= nodalVolume/volumetricCoeff; RHS_Contribution[0] += (-deltaPressure/volumetricCoeff + volumetricDefRate)*nodalVolume; bool stabilizationNeeded=false; if((itNode->Is(FLUID) || (itNode->Is(SOLID) && itNode->FastGetSolutionStepValue(POISSON_RATIO)>0.49))){ stabilizationNeeded=true; }else{ for (unsigned int i = 0; i< neighSize; i++) { unsigned int idNode=nodalSFDneighboursId[i]; EquationId[i]=rModelPart.Nodes()[idNode].GetDof(PRESSURE,xpos).EquationId(); } } if(stabilizationNeeded==true){ /* Vector& rNodalSFDneigh = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS); */ unsigned int firstRow=0; unsigned int firstCol=0; double meanMeshSize=itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE); double characteristicLength=2.0*meanMeshSize; /* double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA); */ double density=itNode->FastGetSolutionStepValue(DENSITY); /* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */ double nodalVelocity=0; if(dimension==2){ nodalVelocity= sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X)*itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y)*itNode->FastGetSolutionStepValue(VELOCITY_Y)); }else if(dimension==3){ nodalVelocity=sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X)*itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y)*itNode->FastGetSolutionStepValue(VELOCITY_Y) + itNode->FastGetSolutionStepValue(VELOCITY_Z)*itNode->FastGetSolutionStepValue(VELOCITY_Z)); } double tauStab= 1.0 * (characteristicLength * characteristicLength * timeInterval) / ( density * nodalVelocity * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval ); /* tauStab*=10.0; */ /* tauStab=0.0000001; */ /* tauStab=100.0; */ LHS_Contribution(0,0)+= +nodalVolume*tauStab*density/(volumetricCoeff*timeInterval); RHS_Contribution[0] += -nodalVolume*tauStab*density/(volumetricCoeff*timeInterval)*(deltaPressure-itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY,0)*timeInterval); if(itNode->Is(FREE_SURFACE)){ /* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */ /* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */ /* double boundLHScontribution=4.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); */ /* std::cout<<"boundLHScontribution "<<boundLHScontribution<<std::endl; */ /* if(itNode->IsNot(RIGID)){ */ LHS_Contribution(0,0) += + 4.0*2.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); RHS_Contribution[0] += - 4.0*2.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE,0); /* } */ /* else { */ /* LHS_Contribution(0,0) += + 4.0/3.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize); */ /* RHS_Contribution[0] += - 4.0/3.0 * tauStab * nodalVolume /(meanMeshSize*meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE,0); */ /* } */ const array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL); Vector& SpatialDefRate=itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); array_1d<double, 3> nodalAcceleration= 0.5*(itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION,1); /* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */ double nodalNormalAcceleration=0; double nodalNormalProjDefRate=0; if(dimension==2){ nodalNormalProjDefRate=Normal[0]*SpatialDefRate[0]*Normal[0] + Normal[1]*SpatialDefRate[1]*Normal[1] + 2*Normal[0]*SpatialDefRate[2]*Normal[1]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */ /* nodalNormalAcceleration=(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))*Normal[0]/timeInterval + */ /* (itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))*Normal[1]/timeInterval; */ nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1]; }else if(dimension==3){ nodalNormalProjDefRate=Normal[0]*SpatialDefRate[0]*Normal[0] + Normal[1]*SpatialDefRate[1]*Normal[1] + Normal[2]*SpatialDefRate[2]*Normal[2] + 2*Normal[0]*SpatialDefRate[3]*Normal[1] + 2*Normal[0]*SpatialDefRate[4]*Normal[2] + 2*Normal[1]*SpatialDefRate[5]*Normal[2]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */ /* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */ } /* RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea; */ double accelerationContribution=2.0*density*nodalNormalAcceleration/meanMeshSize; double deviatoricContribution=8.0*deviatoricCoeff*nodalNormalProjDefRate/(meanMeshSize*meanMeshSize); /* std::cout<<"nodalNormalAcceleration= "<<nodalNormalAcceleration<<std::endl; */ /* std::cout<<"nodalNormalProjDefRate= "<<nodalNormalProjDefRate<<std::endl; */ /* std::cout<<"meanMeshSize "<<meanMeshSize<<std::endl; */ /* accelerationContribution=0; */ /* deviatoricContribution=0; */ /* if(itNode->IsNot(RIGID)){ */ RHS_Contribution[0] += 2.0* tauStab * (accelerationContribution + deviatoricContribution) * nodalVolume; /* }else{ */ /* RHS_Contribution[0] += 1.0/3.0* tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume; */ /* } */ } for (unsigned int i = 0; i< neighSize; i++) { unsigned int idNode=nodalSFDneighboursId[i]; EquationId[i]=rModelPart.Nodes()[idNode].GetDof(PRESSURE,xpos).EquationId(); double Density= rModelPart.Nodes()[idNode].FastGetSolutionStepValue(DENSITY); array_1d<double, 3 >& VolumeAcceleration = rModelPart.Nodes()[idNode].FastGetSolutionStepValue(VOLUME_ACCELERATION); double dNdXi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; double dNdYi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+1]; double dNdZi=0; if(dimension==2){ RHS_Contribution[i] += - tauStab * Density * (dNdXi* VolumeAcceleration[0] + dNdYi* VolumeAcceleration[1]) * nodalVolume; } else if(dimension==3){ dNdZi=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol+2]; RHS_Contribution[i] += - tauStab * Density * (dNdXi* VolumeAcceleration[0] + dNdYi* VolumeAcceleration[1] + dNdZi* VolumeAcceleration[2]) * nodalVolume; } firstRow=0; for (unsigned int j = 0; j< neighSize; j++) { unsigned int idNodeJ=nodalSFDneighboursId[j]; double dNdXj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; double dNdYj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+1]; if(dimension==2){ ////////////////// Laplacian term for LHS LHS_Contribution(i,j)+= + tauStab * (dNdXi*dNdXj + dNdYi*dNdYj) * nodalVolume; ////////////////// Laplacian term L_ij*P_j for RHS RHS_Contribution[i] += - tauStab * (dNdXi*dNdXj + dNdYi*dNdYj) * nodalVolume * rModelPart.Nodes()[idNodeJ].FastGetSolutionStepValue(PRESSURE,0); } else if(dimension==3){ double dNdZj=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow+2]; ////////////////// Laplacian term for LHS LHS_Contribution(i,j) += + tauStab * (dNdXi*dNdXj + dNdYi*dNdYj + dNdZi*dNdZj) * nodalVolume; ////////////////// Laplacian term L_ij*P_j for RHS RHS_Contribution[i] += - tauStab * (dNdXi*dNdXj + dNdYi*dNdYj + dNdZi*dNdZj) * nodalVolume * rModelPart.Nodes()[idNodeJ].FastGetSolutionStepValue(PRESSURE,0); } /* std::cout << "dNdXi= " <<dNdXi<< "dNdYi= " <<dNdYi<< "dNdYj= " <<dNdYj<< "dNdXj= " <<dNdXj<< std::endl; */ firstRow+=dimension; } firstCol+=dimension; } } //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif /* AssembleLHS(A, LHS_Contribution, EquationId); */ /* AssembleRHS(b, RHS_Contribution, EquationId); */ } } } /* /\* std::cout<<".... Build Nodally Continuity Equation DONE!"<<std::endl; *\/ */ /* const double stop_build = OpenMPUtils::GetCurrentTime(); */ /* KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << stop_build - start_build << std::endl; */ /* //for (int i = 0; i < A_size; i++) */ /* // omp_destroy_lock(&lock_array[i]); */ /* KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl; */ KRATOS_CATCH("") } /** * @brief Function to perform the building of the LHS * @details Depending on the implementation choosen the size of the matrix could * be equal to the total number of Dofs or to the number of unrestrained dofs * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A) override { KRATOS_TRY TSystemVectorType tmp(A.size1(), 0.0); this->Build(pScheme, rModelPart, A, tmp); KRATOS_CATCH("") } /** * @brief Build a rectangular matrix of size n*N where "n" is the number of unrestrained degrees of freedom * and "N" is the total number of degrees of freedom involved. * @details This matrix is obtained by building the total matrix without the lines corresponding to the fixed * degrees of freedom (but keeping the columns!!) * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix */ void BuildLHS_CompleteOnFreeRows( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A) override { KRATOS_TRY TSystemVectorType tmp(A.size1(), 0.0); this->Build(pScheme, rModelPart, A, tmp); KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); //prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, ModelPart& rModelPart ) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() ) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING("NodalResidualBasedBlockBuilderAndSolver") << "ATTENTION! setting the RHS to zero!" << std::endl; } //prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY std::cout << "CONTINUITY EQ: buildAndSolve " << std::endl; Timer::Start("Build"); /* Build(pScheme, rModelPart, A, b); */ boost::timer build_time; BuildNodally(pScheme, rModelPart, A, b); std::cout << "CONTINUITY EQ: build_time : " << build_time.elapsed() << std::endl; Timer::Stop("Build"); ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; const double start_solve = OpenMPUtils::GetCurrentTime(); Timer::Start("Solve"); boost::timer solve_time; SystemSolveWithPhysics(A, Dx, b, rModelPart); std::cout << "CONTINUITY EQ: solve_time : " << solve_time.elapsed() << std::endl; Timer::Stop("Solve"); const double stop_solve = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >=1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } /** * @brief Corresponds to the previews, but the System's matrix is considered already built and only the RHS is built again * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildRHSAndSolve( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY BuildRHS(pScheme, rModelPart, b); SystemSolve(A, Dx, b); KRATOS_CATCH("") } /** * @brief Function to perform the build of the RHS. * @details The vector could be sized as the total number of dofs or as the number of unrestrained ones * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void BuildRHS( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b) override { KRATOS_TRY BuildRHSNoDirichlet(pScheme,rModelPart,b); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for (int k = 0; k<ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; const std::size_t i = dof_iterator->EquationId(); if (dof_iterator->IsFixed()) b[i] = 0.0f; } KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart ) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType& pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = OpenMPUtils::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set < NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set < NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of threads" << nthreads << "\n" << std::endl; for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl; #pragma omp parallel firstprivate(nelements, ElementalDofList) { #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < nelements; i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing condition loop" << std::endl; ConditionsArrayType& pConditions = rModelPart.Conditions(); const int nconditions = static_cast<int>(pConditions.size()); #pragma omp for schedule(guided, 512) for (int i = 0; i < nconditions; i++) { typename ConditionsArrayType::iterator it = pConditions.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing tree reduction\n" << std::endl; // Here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5*static_cast<double>(old_max)); while (new_max>=1 && new_max != old_max) { if( this->GetEchoLevel() > 2) { //just for debugging std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { std::cout << i << " - " << i+new_max << std::endl; } } std::cout << "********************" << std::endl; } #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i+new_max].begin(), dofs_aux_list[i+new_max].end()); dofs_aux_list[i+new_max].clear(); } } old_max = new_max; new_max = ceil(0.5*static_cast<double>(old_max)); } KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl; DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it= dofs_aux_list[0].begin(); it!= dofs_aux_list[0].end(); it++) { Doftemp.push_back( it->get() ); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; //Throws an exception if there are no Degrees Of Freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl; KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "Initializing lock array" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) { omp_destroy_lock(&mlock_array[i]); } } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) { omp_init_lock(&mlock_array[i]); } #endif KRATOS_INFO_IF("NodalResidualBasedBlockBuilderAndSolver", ( this->GetEchoLevel() > 2)) << "End of setup dof set\n" << std::endl; // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if(BaseType::GetCalculateReactionsFlag()) { for(auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " <<std::endl << "Node : "<<dof_iterator->Id()<< std::endl << "Dof : "<<(*dof_iterator)<<std::endl<<"Not possible to calculate reactions."<<std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart& rModelPart ) override { //int free_id = 0; BaseType::mEquationSystemSize = BaseType::mDofSet.size(); int ndofs = static_cast<int>(BaseType::mDofSet.size()); #pragma omp parallel for firstprivate(ndofs) for (int i = 0; i < static_cast<int>(ndofs); i++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + i; dof_iterator->SetEquationId(i); } //for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) // dof_iterator->SetEquationId(free_id++); } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType& pA, TSystemVectorPointerType& pDx, TSystemVectorPointerType& pb, ModelPart& rModelPart ) override { KRATOS_TRY boost::timer contruct_matrix; if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } TSystemMatrixType& A = *pA; TSystemVectorType& Dx = *pDx; TSystemVectorType& b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH(" it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR <<"The equation system size has changed during the simulation. This is not permited."<<std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructure(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); std::cout << "CONTINUITY EQ: contruct_matrix : " << contruct_matrix.elapsed() << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { KRATOS_TRY std::cout << "Initialize Solution Step in nodal res based " << std::endl; KRATOS_CATCH("") } //************************************************************************** //************************************************************************** void FinalizeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { } //************************************************************************** //************************************************************************** void CalculateReactions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { TSparseSpace::SetToZero(b); //refresh RHS to have the correct reactions BuildRHSNoDirichlet(pScheme, rModelPart, b); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for (int k = 0; k<ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; const int i = (dof_iterator)->EquationId(); (dof_iterator)->GetSolutionStepReactionValue() = -b[i]; } //KRATOS_WATCH(__LINE__) } /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) override { std::size_t system_size = A.size1(); std::vector<double> scaling_factors (system_size, 0.0f); const int ndofs = static_cast<int>(BaseType::mDofSet.size()); //NOTE: dofs are assumed to be numbered consecutively in the BlockBuilderAndSolver #pragma omp parallel for firstprivate(ndofs) for(int k = 0; k<ndofs; k++) { typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin() + k; if(dof_iterator->IsFixed()) scaling_factors[k] = 0.0f; else scaling_factors[k] = 1.0f; } double* Avalues = A.value_data().begin(); std::size_t* Arow_indices = A.index1_data().begin(); std::size_t* Acol_indices = A.index2_data().begin(); //detect if there is a line of all zeros and set the diagonal to a 1 if this happens #pragma omp parallel for firstprivate(system_size) for(int k = 0; k < static_cast<int>(system_size); ++k) { std::size_t col_begin = Arow_indices[k]; std::size_t col_end = Arow_indices[k+1]; bool empty = true; for (std::size_t j = col_begin; j < col_end; ++j) { if(Avalues[j] != 0.0) { empty = false; break; } } if(empty == true) { A(k,k) = 1.0; b[k] = 0.0; } } #pragma omp parallel for for (int k = 0; k < static_cast<int>(system_size); ++k) { std::size_t col_begin = Arow_indices[k]; std::size_t col_end = Arow_indices[k+1]; double k_factor = scaling_factors[k]; if (k_factor == 0) { // zero out the whole row, except the diagonal for (std::size_t j = col_begin; j < col_end; ++j) if (static_cast<int>(Acol_indices[j]) != k ) Avalues[j] = 0.0; // zero out the RHS b[k] = 0.0; } else { // zero out the column which is associated with the zero'ed row for (std::size_t j = col_begin; j < col_end; ++j) if(scaling_factors[ Acol_indices[j] ] == 0 ) Avalues[j] = 0.0; } } } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { #ifdef _OPENMP for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); mlock_array.resize(0); #endif BaseType::Clear(); } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart& rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ #ifdef _OPENMP std::vector< omp_lock_t > mlock_array; #endif ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType& A, ModelPart& rModelPart) { std::cout<<" ConstructMatrixStructure for Continuity equation "<<std::endl; //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); const std::size_t equation_size = BaseType::mEquationSystemSize; #ifdef USE_GOOGLE_HASH std::vector<google::dense_hash_set<std::size_t> > indices(equation_size); const std::size_t empty_key = 2*equation_size + 10; #else std::vector<std::unordered_set<std::size_t> > indices(equation_size); #endif #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { #ifdef USE_GOOGLE_HASH indices[iii].set_empty_key(empty_key); #else indices[iii].reserve(40); #endif } Element::EquationIdVectorType EquationId; /* #pragma omp parallel */ /* { */ ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { /* VectorType nodalSFDneighboursId=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); */ /* const unsigned int neighSize = nodalSFDneighboursId.size(); */ const unsigned int neighSize =itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER).size(); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); /* const unsigned int xpos = itNode->GetDofPosition(VELOCITY_X); */ const unsigned int xpos = itNode->GetDofPosition(PRESSURE); for (unsigned int i = 0; i< neighSize; i++) { /* unsigned int idNode=nodalSFDneighboursId[i]; */ unsigned int idNode=itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER)[i]; EquationId[i]=rModelPart.Nodes()[idNode].GetDof(PRESSURE,xpos).EquationId(); } for (std::size_t i = 0; i < EquationId.size(); i++) { if (EquationId[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[EquationId[i]]); #endif auto& row_indices = indices[EquationId[i]]; for (auto it = EquationId.begin(); it != EquationId.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[EquationId[i]]); #endif } } /* for (std::size_t i = 0; i < EquationId.size(); i++) */ /* { */ /* #ifdef _OPENMP */ /* omp_set_lock(&mlock_array[EquationId[i]]); */ /* #endif */ /* auto& row_indices = indices[EquationId[i]]; */ /* row_indices.insert(EquationId.begin(), EquationId.end()); */ /* #ifdef _OPENMP */ /* omp_unset_lock(&mlock_array[EquationId[i]]); */ /* #endif */ /* } */ } /* } */ Element::EquationIdVectorType ids(3, 0); #pragma omp parallel for firstprivate(nconditions, ids) for (int iii = 0; iii<nconditions; iii++) { typename ConditionsArrayType::iterator i_condition = cond_begin + iii; pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo); for (std::size_t i = 0; i < ids.size(); i++) { #ifdef _OPENMP omp_set_lock(&mlock_array[ids[i]]); #endif auto& row_indices = indices[ids[i]]; row_indices.insert(ids.begin(), ids.end()); #ifdef _OPENMP omp_unset_lock(&mlock_array[ids[i]]); #endif } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double* Avalues = A.value_data().begin(); std::size_t* Arow_indices = A.index1_data().begin(); std::size_t* Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i+1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i+1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } indices[i].clear(); //deallocating the memory std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size()+1, nnz); Timer::Stop("MatrixStructure"); /* std::cout<<"..... ConstructMatrixStructure for Continuity equation DONE"<<std::endl; */ } /* virtual void ConstructMatrixStructure( */ /* typename TSchemeType::Pointer pScheme, */ /* TSystemMatrixType& A, */ /* ModelPart& rModelPart) */ /* { */ /* //filling with zero the matrix (creating the structure) */ /* Timer::Start("MatrixStructure"); */ /* // Getting the elements from the model */ /* const int nelements = static_cast<int>(rModelPart.Elements().size()); */ /* // Getting the array of the conditions */ /* const int nconditions = static_cast<int>(rModelPart.Conditions().size()); */ /* ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); */ /* ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); */ /* ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); */ /* const std::size_t equation_size = BaseType::mEquationSystemSize; */ /* #ifdef USE_GOOGLE_HASH */ /* std::vector<google::dense_hash_set<std::size_t> > indices(equation_size); */ /* const std::size_t empty_key = 2*equation_size + 10; */ /* #else */ /* std::vector<std::unordered_set<std::size_t> > indices(equation_size); */ /* #endif */ /* #pragma omp parallel for firstprivate(equation_size) */ /* for (int iii = 0; iii < static_cast<int>(equation_size); iii++) */ /* { */ /* #ifdef USE_GOOGLE_HASH */ /* indices[iii].set_empty_key(empty_key); */ /* #else */ /* indices[iii].reserve(40); */ /* #endif */ /* } */ /* Element::EquationIdVectorType ids(3, 0); */ /* #pragma omp parallel for firstprivate(nelements, ids) */ /* for(int iii=0; iii<nelements; iii++) */ /* { */ /* typename ElementsContainerType::iterator i_element = el_begin + iii; */ /* pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo); */ /* for (std::size_t i = 0; i < ids.size(); i++) */ /* { */ /* #ifdef _OPENMP */ /* omp_set_lock(&mlock_array[ids[i]]); */ /* #endif */ /* auto& row_indices = indices[ids[i]]; */ /* row_indices.insert(ids.begin(), ids.end()); */ /* #ifdef _OPENMP */ /* omp_unset_lock(&mlock_array[ids[i]]); */ /* #endif */ /* } */ /* } */ /* #pragma omp parallel for firstprivate(nconditions, ids) */ /* for (int iii = 0; iii<nconditions; iii++) */ /* { */ /* typename ConditionsArrayType::iterator i_condition = cond_begin + iii; */ /* pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo); */ /* for (std::size_t i = 0; i < ids.size(); i++) */ /* { */ /* #ifdef _OPENMP */ /* omp_set_lock(&mlock_array[ids[i]]); */ /* #endif */ /* auto& row_indices = indices[ids[i]]; */ /* row_indices.insert(ids.begin(), ids.end()); */ /* #ifdef _OPENMP */ /* omp_unset_lock(&mlock_array[ids[i]]); */ /* #endif */ /* } */ /* } */ /* //count the row sizes */ /* unsigned int nnz = 0; */ /* for (unsigned int i = 0; i < indices.size(); i++) */ /* nnz += indices[i].size(); */ /* A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); */ /* double* Avalues = A.value_data().begin(); */ /* std::size_t* Arow_indices = A.index1_data().begin(); */ /* std::size_t* Acol_indices = A.index2_data().begin(); */ /* //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! */ /* Arow_indices[0] = 0; */ /* for (int i = 0; i < static_cast<int>(A.size1()); i++) */ /* Arow_indices[i+1] = Arow_indices[i] + indices[i].size(); */ /* #pragma omp parallel for */ /* for (int i = 0; i < static_cast<int>(A.size1()); i++) */ /* { */ /* const unsigned int row_begin = Arow_indices[i]; */ /* const unsigned int row_end = Arow_indices[i+1]; */ /* unsigned int k = row_begin; */ /* for (auto it = indices[i].begin(); it != indices[i].end(); it++) */ /* { */ /* Acol_indices[k] = *it; */ /* Avalues[k] = 0.0; */ /* k++; */ /* } */ /* indices[i].clear(); //deallocating the memory */ /* std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); */ /* } */ /* A.set_filled(indices.size()+1, nnz); */ /* Timer::Stop("MatrixStructure"); */ /* } */ //************************************************************************** void AssembleLHS( TSystemMatrixType& A, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } void Assemble( TSystemMatrixType& A, TSystemVectorType& b, const LocalSystemMatrixType& LHS_Contribution, const LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId #ifdef USE_LOCKS_IN_ASSEMBLY ,std::vector< omp_lock_t >& lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; #ifdef USE_LOCKS_IN_ASSEMBLY omp_set_lock(&lock_array[i_global]); b[i_global] += RHS_Contribution(i_local); #else double& r_a = b[i_global]; const double& v_a = RHS_Contribution(i_local); #pragma omp atomic r_a += v_a; #endif AssembleRowContribution(A, LHS_Contribution, i_global, i_local, EquationId); #ifdef USE_LOCKS_IN_ASSEMBLY omp_unset_lock(&lock_array[i_global]); #endif //note that computation of reactions is not performed here! } } //************************************************************************** void AssembleRHS( TSystemVectorType& b, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId ) { unsigned int local_size = RHS_Contribution.size(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; // ASSEMBLING THE SYSTEM VECTOR double& b_value = b[i_global]; const double& rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } void BuildRHSNoDirichlet( typename TSchemeType::Pointer pScheme, ModelPart& rModelPart, TSystemVectorType& b) { KRATOS_TRY //Getting the Elements ElementsArrayType& pElements = rModelPart.Elements(); //getting the array of the conditions ConditionsArrayType& ConditionsArray = rModelPart.Conditions(); ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // assemble all elements //for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it) const int nelements = static_cast<int>(pElements.size()); #pragma omp parallel firstprivate(nelements, RHS_Contribution, EquationId) { #pragma omp for schedule(guided, 512) nowait for(int i=0; i<nelements; i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if( (it)->IsDefined(ACTIVE) ) element_is_active = (it)->Is(ACTIVE); if(element_is_active) { //calculate elemental Right Hand Side Contribution pScheme->Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b, RHS_Contribution, EquationId); } } LHS_Contribution.resize(0, 0, false); RHS_Contribution.resize(0, false); // assemble all conditions //for (typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it) const int nconditions = static_cast<int>(ConditionsArray.size()); //#pragma omp parallel for firstprivate(nconditions, RHS_Contribution, EquationId) schedule(dynamic, 1024) #pragma omp for schedule(guided, 512) for (int i = 0; i<nconditions; i++) { auto it = ConditionsArray.begin() + i; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if( (it)->IsDefined(ACTIVE) ) condition_is_active = (it)->Is(ACTIVE); if(condition_is_active) { //calculate elemental contribution pScheme->Condition_Calculate_RHS_Contribution(*(it.base()), RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution AssembleRHS(b, RHS_Contribution, EquationId); } } } KRATOS_CATCH("") } //****************************************************************************************** //****************************************************************************************** inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } inline void AssembleRowContribution(TSystemMatrixType& A, const Matrix& Alocal, const unsigned int i, const unsigned int i_local, Element::EquationIdVectorType& EquationId) { double* values_vector = A.value_data().begin(); std::size_t* index1_vector = A.index1_data().begin(); std::size_t* index2_vector = A.index2_data().begin(); size_t left_limit = index1_vector[i]; // size_t right_limit = index1_vector[i+1]; //find the first entry size_t last_pos = ForwardFind(EquationId[0],left_limit,index2_vector); size_t last_found = EquationId[0]; #ifndef USE_LOCKS_IN_ASSEMBLY double& r_a = values_vector[last_pos]; const double& v_a = Alocal(i_local,0); #pragma omp atomic r_a += v_a; #else values_vector[last_pos] += Alocal(i_local,0); #endif //now find all of the other entries size_t pos = 0; for(unsigned int j=1; j<EquationId.size(); j++) { unsigned int id_to_find = EquationId[j]; if(id_to_find > last_found) pos = ForwardFind(id_to_find,last_pos+1,index2_vector); else pos = BackwardFind(id_to_find,last_pos-1,index2_vector); #ifndef USE_LOCKS_IN_ASSEMBLY double& r = values_vector[pos]; const double& v = Alocal(i_local,j); #pragma omp atomic r += v; #else values_vector[pos] += Alocal(i_local,j); #endif last_found = id_to_find; last_pos = pos; } } inline unsigned int ForwardFind(const unsigned int id_to_find, const unsigned int start, const size_t* index_vector) { unsigned int pos = start; while(id_to_find != index_vector[pos]) pos++; return pos; } inline unsigned int BackwardFind(const unsigned int id_to_find, const unsigned int start, const size_t* index_vector) { unsigned int pos = start; while(id_to_find != index_vector[pos]) pos--; return pos; } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedBlockBuilderAndSolver */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_BLOCK_BUILDER_AND_SOLVER defined */
a.21.3.c
/* { dg-do compile } */ void work (int); void a21_good (int n) { int i; #pragma omp for ordered for (i = 0; i < n; i++) { if (i <= 10) { #pragma omp ordered work (i); } if (i > 10) { #pragma omp ordered work (i + 1); } } }
8152.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c' as parsed by frontend compiler rose void kernel_fdtd_2d(int tmax, int nx, int ny, double ex[1000 + 0][1200 + 0], double ey[1000 + 0][1200 + 0], double hz[1000 + 0][1200 + 0], double _fict_[500 + 0]) { int t10; int t8; int t6; int t4; int t2; for (t2 = 0; t2 <= tmax - 1; t2 += 1) { for (t4 = 0; t4 <= ny - 1; t4 += 1) ey[0][t4] = _fict_[t2]; #pragma omp parallel for for (t4 = 1; t4 <= nx - 1; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 1 ? t4 + 31 : nx - 1); t6 += 1) for (t8 = 0; t8 <= ny - 1; t8 += 128) for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1) ey[t6][t10] = ey[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6 - 1][t10]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 1; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 1 ? t4 + 31 : nx - 1); t6 += 1) for (t8 = 1; t8 <= ny - 1; t8 += 128) for (t10 = t8; t10 <= (ny - 1 < t8 + 127 ? ny - 1 : t8 + 127); t10 += 1) ex[t6][t10] = ex[t6][t10] - 0.5 * (hz[t6][t10] - hz[t6][t10 - 1]); #pragma omp parallel for for (t4 = 0; t4 <= nx - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < nx - 2 ? t4 + 31 : nx - 2); t6 += 1) for (t8 = 0; t8 <= ny - 2; t8 += 128) for (t10 = t8; t10 <= (ny - 2 < t8 + 127 ? ny - 2 : t8 + 127); t10 += 1) hz[t6][t10] = hz[t6][t10] - 0.69999999999999996 * (ex[t6][t10 + 1] - ex[t6][t10] + ey[t6 + 1][t10] - ey[t6][t10]); } }
GB_malloc_memory.c
//------------------------------------------------------------------------------ // GB_malloc_memory: wrapper for malloc //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // A wrapper for MALLOC. Space is not initialized. // Parameters are the same as the POSIX malloc, except that asking to allocate // a block of zero size causes a block of size 1 to be allocated instead. This // allows the return pointer p to be checked for the out-of-memory condition, // even when allocating an object of size zero. // By default, MALLOC is defined in GB.h as malloc. For a MATLAB mexFunction, // it is mxMalloc. It can also be defined at compile time with // -DMALLOC=mymallocfunc. #include "GB.h" void *GB_malloc_memory // pointer to allocated block of memory ( size_t nitems, // number of items to allocate size_t size_of_item // sizeof each item ) { void *p ; size_t size ; int nmalloc ; // make sure at least one item is allocated nitems = IMAX (1, nitems) ; // make sure at least one byte is allocated size_of_item = IMAX (1, size_of_item) ; bool ok = GB_size_t_multiply (&size, nitems, size_of_item) ; if (!ok || nitems > GB_INDEX_MAX || size_of_item > GB_INDEX_MAX) { // overflow p = NULL ; } else { // check the malloc debug status. This debug flag is set outside // of GraphBLAS and not modified, so it is safe to check it outside // a critical section. bool pretend_to_fail = false ; if (GB_Global.malloc_debug) { // brutal malloc debug; pretend to fail if the count <= 0 #pragma omp critical (GB_memory) { pretend_to_fail = (GB_Global.malloc_debug_count-- <= 0) ; } } if (pretend_to_fail) { p = NULL ; } else { p = (void *) MALLOC (size) ; } if (p != NULL) { #pragma omp critical (GB_memory) { nmalloc = ++GB_Global.nmalloc ; GB_Global.inuse += nitems * size_of_item ; GB_Global.maxused = IMAX (GB_Global.maxused, GB_Global.inuse) ; } #ifdef PRINT_MALLOC printf ("malloc: %14p %3d %1d n "GBd" size "GBd"\n", p, nmalloc, GB_Global.malloc_debug, (int64_t) nitems, (int64_t) size_of_item) ; #endif } } return (p) ; }
par_lr_interp.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_parcsr_ls.h" #include "aux_interp.h" #define MAX_C_CONNECTIONS 100 #define HAVE_COMMON_C 1 /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildStdInterp * Comment: The interpolatory weighting can be changed with the sep_weight * variable. This can enable not separating negative and positive * off diagonals in the weight formula. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildStdInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int sep_weight, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /* HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; //HYPRE_BigInt *found; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; HYPRE_Int *ihat = NULL; HYPRE_Int *ihat_offd = NULL; HYPRE_Int *ipnt = NULL; HYPRE_Int *ipnt_offd = NULL; HYPRE_Int strong_f_marker = -2; /* Interpolation weight variables */ HYPRE_Real *ahat = NULL; HYPRE_Real *ahat_offd = NULL; HYPRE_Real sum_pos, sum_pos_C, sum_neg, sum_neg_C, sum, sum_C; HYPRE_Real diagonal, distribute; HYPRE_Real alfa = 1.; HYPRE_Real beta = 1.; /* Loop variables */ // HYPRE_Int index; HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, j1, jj, kk, k1; HYPRE_Int cnt_c, cnt_f, cnt_c_offd, cnt_f_offd, indx; HYPRE_BigInt big_k1; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; HYPRE_Real wall_1 = 0; HYPRE_Real wall_2 = 0; HYPRE_Real wall_3 = 0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag== 4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 0); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] >= 0) { if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (CF_marker_offd[loc_col] >= 0) { if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } /* Initialize ahat, which is a modification to a, used in the standard * interpolation routine. */ if (n_fine) { ahat = hypre_CTAlloc(HYPRE_Real, n_fine, HYPRE_MEMORY_HOST); ihat = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); ipnt = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { ahat_offd = hypre_CTAlloc(HYPRE_Real, full_off_procNodes, HYPRE_MEMORY_HOST); ihat_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); ipnt_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } for (i = 0; i < n_fine; i++) { P_marker[i] = -1; ahat[i] = 0; ihat[i] = -1; } for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1; ahat_offd[i] = 0; ihat_offd[i] = -1; } /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if (num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { if (debug_flag==4) wall_time = time_getWallclockSeconds(); strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = i1; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = k1; P_diag_data[jj_counter] = zero; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd]=i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] >= 0) { if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = loc_col; P_diag_data[jj_counter] = zero; jj_counter++; } } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (CF_marker_offd[loc_col] >= 0) { if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_1 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); cnt_c = 0; cnt_f = jj_end_row-jj_begin_row; cnt_c_offd = 0; cnt_f_offd = jj_end_row_offd-jj_begin_row_offd; ihat[i] = cnt_f; ipnt[cnt_f] = i; ahat[cnt_f++] = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is direct neighbor */ i1 = A_diag_j[jj]; if (P_marker[i1] != strong_f_marker) { indx = ihat[i1]; if (indx > -1) ahat[indx] += A_diag_data[jj]; else if (P_marker[i1] >= jj_begin_row) { ihat[i1] = cnt_c; ipnt[cnt_c] = i1; ahat[cnt_c++] += A_diag_data[jj]; } else if (CF_marker[i1] != -3) { ihat[i1] = cnt_f; ipnt[cnt_f] = i1; ahat[cnt_f++] += A_diag_data[jj]; } } else { if (num_functions == 1 || dof_func[i] == dof_func[i1]) { distribute = A_diag_data[jj]/A_diag_data[A_diag_i[i1]]; for (kk = A_diag_i[i1]+1; kk < A_diag_i[i1+1]; kk++) { k1 = A_diag_j[kk]; indx = ihat[k1]; if (indx > -1) ahat[indx] -= A_diag_data[kk]*distribute; else if (P_marker[k1] >= jj_begin_row) { ihat[k1] = cnt_c; ipnt[cnt_c] = k1; ahat[cnt_c++] -= A_diag_data[kk]*distribute; } else { ihat[k1] = cnt_f; ipnt[cnt_f] = k1; ahat[cnt_f++] -= A_diag_data[kk]*distribute; } } if (num_procs > 1) { for (kk = A_offd_i[i1]; kk < A_offd_i[i1+1]; kk++) { k1 = A_offd_j[kk]; indx = ihat_offd[k1]; if (num_functions == 1 || dof_func[i1] == dof_func_offd[k1]) { if (indx > -1) ahat_offd[indx] -= A_offd_data[kk]*distribute; else if (P_marker_offd[k1] >= jj_begin_row_offd) { ihat_offd[k1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = k1; ahat_offd[cnt_c_offd++] -= A_offd_data[kk]*distribute; } else { ihat_offd[k1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = k1; ahat_offd[cnt_f_offd++] -= A_offd_data[kk]*distribute; } } } } } } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] != strong_f_marker) { indx = ihat_offd[i1]; if (indx > -1) ahat_offd[indx] += A_offd_data[jj]; else if (P_marker_offd[i1] >= jj_begin_row_offd) { ihat_offd[i1] = cnt_c_offd; ipnt_offd[cnt_c_offd] = i1; ahat_offd[cnt_c_offd++] += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { ihat_offd[i1] = cnt_f_offd; ipnt_offd[cnt_f_offd] = i1; ahat_offd[cnt_f_offd++] += A_offd_data[jj]; } } else { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) { distribute = A_offd_data[jj]/A_ext_data[A_ext_i[i1]]; for (kk = A_ext_i[i1]+1; kk < A_ext_i[i1+1]; kk++) { big_k1 = A_ext_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /*diag*/ loc_col = (HYPRE_Int)(big_k1 - col_1); indx = ihat[loc_col]; if (indx > -1) ahat[indx] -= A_ext_data[kk]*distribute; else if (P_marker[loc_col] >= jj_begin_row) { ihat[loc_col] = cnt_c; ipnt[cnt_c] = loc_col; ahat[cnt_c++] -= A_ext_data[kk]*distribute; } else { ihat[loc_col] = cnt_f; ipnt[cnt_f] = loc_col; ahat[cnt_f++] -= A_ext_data[kk]*distribute; } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (num_functions == 1 || dof_func_offd[loc_col] == dof_func_offd[i1]) { indx = ihat_offd[loc_col]; if (indx > -1) ahat_offd[indx] -= A_ext_data[kk]*distribute; else if (P_marker_offd[loc_col] >= jj_begin_row_offd) { ihat_offd[loc_col] = cnt_c_offd; ipnt_offd[cnt_c_offd] = loc_col; ahat_offd[cnt_c_offd++] -= A_ext_data[kk]*distribute; } else { ihat_offd[loc_col] = cnt_f_offd; ipnt_offd[cnt_f_offd] = loc_col; ahat_offd[cnt_f_offd++] -= A_ext_data[kk]*distribute; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_2 += wall_time; fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds(); diagonal = ahat[cnt_c]; ahat[cnt_c] = 0; sum_pos = 0; sum_pos_C = 0; sum_neg = 0; sum_neg_C = 0; sum = 0; sum_C = 0; if (sep_weight == 1) { for (jj=0; jj < cnt_c; jj++) { if (ahat[jj] > 0) { sum_pos_C += ahat[jj]; } else { sum_neg_C += ahat[jj]; } } if (num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos_C += ahat_offd[jj]; } else { sum_neg_C += ahat_offd[jj]; } } } sum_pos = sum_pos_C; sum_neg = sum_neg_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { if (ahat[jj] > 0) { sum_pos += ahat[jj]; } else { sum_neg += ahat[jj]; } ahat[jj] = 0; } if (num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { if (ahat_offd[jj] > 0) { sum_pos += ahat_offd[jj]; } else { sum_neg += ahat_offd[jj]; } ahat_offd[jj] = 0; } } if (sum_neg_C*diagonal != 0) alfa = sum_neg/sum_neg_C/diagonal; if (sum_pos_C*diagonal != 0) beta = sum_pos/sum_pos_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; if (ahat[j1] > 0) P_diag_data[jj] = -beta*ahat[j1]; else P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if (num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; if (ahat_offd[j1] > 0) P_offd_data[jj] = -beta*ahat_offd[j1]; else P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } else { for (jj=0; jj < cnt_c; jj++) { sum_C += ahat[jj]; } if (num_procs > 1) { for (jj=0; jj < cnt_c_offd; jj++) { sum_C += ahat_offd[jj]; } } sum = sum_C; for (jj=cnt_c+1; jj < cnt_f; jj++) { sum += ahat[jj]; ahat[jj] = 0; } if (num_procs > 1) { for (jj=cnt_c_offd; jj < cnt_f_offd; jj++) { sum += ahat_offd[jj]; ahat_offd[jj] = 0; } } if (sum_C*diagonal != 0) alfa = sum/sum_C/diagonal; /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ for (jj = jj_begin_row; jj < jj_end_row; jj++) { j1 = ihat[P_diag_j[jj]]; P_diag_data[jj] = -alfa*ahat[j1]; P_diag_j[jj] = fine_to_coarse[P_diag_j[jj]]; ahat[j1] = 0; } for (jj=0; jj < cnt_f; jj++) ihat[ipnt[jj]] = -1; if (num_procs > 1) { for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) { j1 = ihat_offd[P_offd_j[jj]]; P_offd_data[jj] = -alfa*ahat_offd[j1]; ahat_offd[j1] = 0; } for (jj=0; jj < cnt_f_offd; jj++) ihat_offd[ipnt_offd[jj]] = -1; } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; wall_3 += wall_time; fflush(NULL); } } } if (debug_flag==4) { hypre_printf("Proc = %d fill part 1 %f part 2 %f part 3 %f\n", my_id, wall_1, wall_2, wall_3); fflush(NULL); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(ahat, HYPRE_MEMORY_HOST); hypre_TFree(ihat, HYPRE_MEMORY_HOST); hypre_TFree(ipnt, HYPRE_MEMORY_HOST); if (full_off_procNodes) { hypre_TFree(ahat_offd, HYPRE_MEMORY_HOST); hypre_TFree(ihat_offd, HYPRE_MEMORY_HOST); hypre_TFree(ipnt_offd, HYPRE_MEMORY_HOST); } if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtPIInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtPIInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime(); #endif /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /* Threading variables */ HYPRE_Int my_thread_num, num_threads, start, stop; HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); HYPRE_Int * diag_offset; HYPRE_Int * fine_to_coarse_offset; HYPRE_Int * offd_offset; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } /* This function is smart enough to check P_marker and P_marker_offd only, * and set them if they are not NULL. The other vectors are set regardless.*/ hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); /*----------------------------------------------------------------------- * Initialize threading variables *-----------------------------------------------------------------------*/ max_num_threads[0] = hypre_NumThreads(); diag_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); fine_to_coarse_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); offd_offset = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST); for (i=0; i < max_num_threads[0]; i++) { diag_offset[i] = 0; fine_to_coarse_offset[i] = 0; offd_offset[i] = 0; } /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel private(i,my_thread_num,num_threads,start,stop,coarse_counter,jj_counter,jj_counter_offd, P_marker, P_marker_offd,jj,kk,i1,k1,loc_col,jj_begin_row,jj_begin_row_offd,jj_end_row,jj_end_row_offd,diagonal,sum,sgn,jj1,i2,distribute,strong_f_marker, big_k1) #endif { /* Parallelize by computing only over each thread's range of rows. * * The first large for loop computes ~locally~ for each thread P_diag_i, * P_offd_i and fine_to_coarse. Then, the arrays are stitched together * For eaxample the first phase would compute * P_diag_i = [0, 2, 4, 7, 2, 5, 6] * for two threads. P_diag_i[stop] points to the end of that * thread's data, but P_diag_i[start] points to the end of the * previous thread's row range. This is then stitched together at the * end to yield, * P_diag_i = [0, 2, 4, 7, 9, 14, 15]. * * The second large for loop computes interpolation weights and is * relatively straight-forward to thread. */ /* initialize thread-wise variables */ strong_f_marker = -2; coarse_counter = 0; jj_counter = start_indexing; jj_counter_offd = start_indexing; if (n_fine) { P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); for (i = 0; i < full_off_procNodes; i++) { P_marker_offd[i] = -1;} } /* this thread's row range */ my_thread_num = hypre_GetThreadNum(); num_threads = hypre_NumActiveThreads(); start = (n_fine/num_threads)*my_thread_num; if (my_thread_num == num_threads-1) { stop = n_fine; } else { stop = (n_fine/num_threads)*(my_thread_num+1); } /* loop over rows */ /* This loop counts the number of elements in P */ /* is done by counting the elmements in the index set C-hat */ for (i = start; i < stop; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { /* row in P corresponding to a coarse pt., will only require one element (1 on the diagonal). */ jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } /*----------------------------------------------------------------------- * End loop over fine grid. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif P_diag_i[stop] = jj_counter; P_offd_i[stop] = jj_counter_offd; fine_to_coarse_offset[my_thread_num] = coarse_counter; diag_offset[my_thread_num] = jj_counter; offd_offset[my_thread_num] = jj_counter_offd; /* Stitch P_diag_i, P_offd_i and fine_to_coarse together */ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { /* Calculate the offset for P_diag_i and P_offd_i for each thread */ for (i = 1; i < num_threads; i++) { diag_offset[i] = diag_offset[i-1] + diag_offset[i]; fine_to_coarse_offset[i] = fine_to_coarse_offset[i-1] + fine_to_coarse_offset[i]; offd_offset[i] = offd_offset[i-1] + offd_offset[i]; } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num > 0) { /* update row pointer array with offset, * making sure to update the row stop index */ for (i = start+1; i <= stop; i++) { P_diag_i[i] += diag_offset[my_thread_num-1]; P_offd_i[i] += offd_offset[my_thread_num-1]; } /* update fine_to_coarse by offsetting with the offset * from the preceding thread */ for (i = start; i < stop; i++) { if (fine_to_coarse[i] >= 0) { fine_to_coarse[i] += fine_to_coarse_offset[my_thread_num-1]; } } } #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif if (my_thread_num == 0) { if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED); } } /* Fine to coarse mapping */ if (num_procs > 1 && my_thread_num == 0) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp barrier #endif for (i = start; i < stop; i++) { jj_begin_row = P_diag_i[i]; jj_begin_row_offd = P_offd_i[i]; jj_counter = jj_begin_row; jj_counter_offd = jj_begin_row_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly influence i. */ for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if (i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row || loc_col == i) sum += A_ext_data[jj1]; } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if (loc_col == i) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } /*----------------------------------------------------------------------- * End large for loop over nfine *-----------------------------------------------------------------------*/ if (n_fine) { hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } } /*----------------------------------------------------------------------- * End PAR_REGION *-----------------------------------------------------------------------*/ if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] -= hypre_MPI_Wtime(); #endif P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(diag_offset, HYPRE_MEMORY_HOST); hypre_TFree(offd_offset, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse_offset, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); } hypre_MatvecCommPkgDestroy(extend_comm_pkg); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtPICCInterp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtPICCInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int **ext_p, **ext_p_offd;*/ /*HYPRE_Int ccounter_offd; HYPRE_Int *clist_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /*HYPRE_Int ccounter; HYPRE_Int *clist, ccounter;*/ /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } /*clist = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS); for (i = 0; i < MAX_C_CONNECTIONS; i++) clist[i] = 0; if (num_procs > 1) { clist_offd = hypre_CTAlloc(HYPRE_Int, MAX_C_CONNECTIONS, HYPRE_MEMORY_HOST); for (i = 0; i < MAX_C_CONNECTIONS; i++) clist_offd[i] = 0; }*/ hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /* Initialize ccounter for each f point */ /*ccounter = 0; ccounter_offd = 0;*/ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { /*clist[ccounter++] = i1;*/ P_marker[i1] = jj_counter; jj_counter++; } } } /*qsort0(clist,0,ccounter-1);*/ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < P_offd_i[i]) { /*clist_offd[ccounter_offd++] = i1;*/ tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } /*qsort0(clist_offd,0,ccounter_offd-1);*/ } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { /*if (hypre_BinarySearch(clist,k1,ccounter) >= 0) {*/ common_c = 1; break; /*kk = S_diag_i[i1+1]; }*/ } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ /*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = S_offd_i[i1+1]; }*/ } } } if (!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; /*break;*/ } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; /*break;*/ } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] == -1) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { /*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } else { loc_col = (HYPRE_BigInt)(-big_k1 - 1); if (CF_marker_offd[loc_col] == 2) { /*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; /*break;*/ } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; /*break;*/ } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_SHARED); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_SHARED); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_SHARED); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_SHARED); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*ccounter = start_indexing; ccounter_offd = start_indexing;*/ /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if (num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /*ccounter = 0; ccounter_offd = 0;*/ strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; /*clist[ccounter++] = i1;*/ } } } /*qsort0(clist,0,ccounter-1);*/ if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*clist_offd[ccounter_offd++] = i1;*/ } } } /*qsort0(clist_offd,0,ccounter_offd-1);*/ } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { /*if (hypre_BinarySearch(clist,k1,ccounter) >= 0) {*/ common_c = 1; break; /*kk = S_diag_i[i1+1]; }*/ } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ /*if (hypre_BinarySearch(clist_offd,k1,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = S_offd_i[i1+1]; }*/ } } } if (!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; /*break;*/ } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*break;*/ } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { /*if (hypre_BinarySearch(clist,loc_col,ccounter) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (CF_marker_offd[loc_col] == 2) { /*if (hypre_BinarySearch(clist_offd,loc_col,ccounter_offd) >= 0) {*/ common_c = 1; break; /*kk = Sop_i[i1+1]; }*/ } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; /*break;*/ } } else { loc_col = (-big_k1 - 1); if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; /*break;*/ } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if ((P_marker[i2] >= jj_begin_row || i2 == i) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; if (i2 == i && (sgn*A_diag_data[jj1]) < 0) diagonal += distribute*A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; sgn = 1; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row || loc_col == i) sum += A_ext_data[jj1]; } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; if (loc_col == i) diagonal += distribute*A_ext_data[jj1]; } else { loc_col = (HYPRE_Int)(-big_k1 - 1); if (P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); /*hypre_TFree(clist);*/ if (num_procs > 1) { /*hypre_TFree(clist_offd);*/ hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildFFInterp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildFFInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int ccounter_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; HYPRE_Int sgn = 1; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /*HYPRE_Int ccounter; HYPRE_Int *clist, ccounter;*/ /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else { /* Initialize ccounter for each f point */ /*ccounter = 0; ccounter_offd = 0;*/ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] < 0) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { common_c = 1; break; } } } if (!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] < 0) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*ccounter = start_indexing; ccounter_offd = start_indexing;*/ /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ jj_begin_row_offd = 0; for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if (num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /*ccounter = 0; ccounter_offd = 0;*/ strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { common_c = 1; break; } } } if (!common_c) { /* No common c point, extend the interp set */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) sum += A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildFF1Interp * Comment: Only use FF when there is no common c point. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildFF1Interp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /*HYPRE_Int ccounter_offd;*/ HYPRE_Int common_c; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; HYPRE_Int sgn = 1; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /*HYPRE_Int ccounter;*/ HYPRE_Int found_c = 0; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else { /* Initialize ccounter for each f point */ /*ccounter = 0; ccounter_offd = 0;*/ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] > 0) { /* i1 is a C point */ CF_marker[i1] = 2; if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if (CF_marker_offd[i1] > 0) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search diag to find f neighbors and determine if common c point */ i1 = S_diag_j[jj]; if (CF_marker[i1] < 0) { /* i1 is a F point, loop through it's strong neighbors */ common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ common_c = 1; break; } } } if (!common_c) { /* No common c point, extend the interp set */ found_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] > 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; found_c = 1; break; } } } if (num_procs > 1 && !found_c) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] > 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; break; } } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] < 0) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; break; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* search through diag to find all c neighbors */ i1 = S_diag_j[jj]; if (CF_marker[i1] == 2) CF_marker[i1] = 1; } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { /* search through offd to find all c neighbors */ if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if (CF_marker_offd[i1] == 2) { /* i1 is a C point direct neighbor */ CF_marker_offd[i1] = 1; } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*ccounter = start_indexing; ccounter_offd = start_indexing;*/ /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ jj_begin_row_offd = 0; for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; if (num_procs > 1) jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { /*ccounter = 0; ccounter_offd = 0;*/ strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] > 0) { CF_marker[i1] = 2; if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] > 0) { CF_marker_offd[i1] = 2; if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search through F points */ i1 = S_diag_j[jj]; if (CF_marker[i1] == -1) { P_marker[i1] = strong_f_marker; common_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] == 2) { common_c = 1; break; } } if (num_procs > 1 && common_c == 0) { /* no common c point yet, check offd */ for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] == 2) { /* k1 is a c point check if it is common */ common_c = 1; break; } } } if (!common_c) { /* No common c point, extend the interp set */ found_c = 0; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; found_c = 1; break; } } } if (num_procs > 1 && !found_c) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; break; } } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] == -1) { /* F points that are off proc */ P_marker_offd[i1] = strong_f_marker; common_c = 0; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { /* Check if common c */ big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (CF_marker[loc_col] == 2) { common_c = 1; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (CF_marker_offd[loc_col] == 2) { common_c = 1; break; } } } if (!common_c) { for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; break; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; break; } } } } } } } for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { /* Search C points only */ i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] == 2) { CF_marker[i1] = 1; } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { if (col_offd_S_to_A) i1 = col_offd_S_to_A[S_offd_j[jj]]; else i1 = S_offd_j[jj]; if ( CF_marker_offd[i1] == 2) { CF_marker_offd[i1] = 1; } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else diagonal += A_diag_data[jj]; } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) sum += A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = - (HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else diagonal += A_offd_data[jj]; } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); /*hynre_TFree(clist);*/ if (num_procs > 1) { /*hypre_TFree(clist_offd);*/ hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildExtInterp * Comment: *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildExtInterp(hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int max_elmts, HYPRE_Int *col_offd_S_to_A, hypre_ParCSRMatrix **P_ptr) { /* Communication Variables */ MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); HYPRE_Int my_id, num_procs; /* Variables to store input variables */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /*HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(A);*/ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(A); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_BigInt total_global_cpts, my_first_cpt; /* Variables to store strong connection matrix info */ hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); /* Interpolation matrix P */ hypre_ParCSRMatrix *P; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data = NULL; HYPRE_Int *P_diag_i, *P_diag_j = NULL; HYPRE_Real *P_offd_data = NULL; HYPRE_Int *P_offd_i, *P_offd_j = NULL; /*HYPRE_Int *col_map_offd_P = NULL;*/ HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker = NULL; HYPRE_Int *P_marker_offd = NULL; HYPRE_Int *CF_marker_offd = NULL; HYPRE_Int *tmp_CF_marker_offd = NULL; HYPRE_Int *dof_func_offd = NULL; /* Full row information for columns of A that are off diag*/ hypre_CSRMatrix *A_ext; HYPRE_Real *A_ext_data; HYPRE_Int *A_ext_i; HYPRE_BigInt *A_ext_j; HYPRE_Int *fine_to_coarse = NULL; HYPRE_BigInt *fine_to_coarse_offd = NULL; HYPRE_Int loc_col; HYPRE_Int full_off_procNodes; hypre_CSRMatrix *Sop; HYPRE_Int *Sop_i; HYPRE_BigInt *Sop_j; HYPRE_Int sgn = 1; /* Variables to keep count of interpolatory points */ HYPRE_Int jj_counter, jj_counter_offd; HYPRE_Int jj_begin_row, jj_end_row; HYPRE_Int jj_begin_row_offd = 0; HYPRE_Int jj_end_row_offd = 0; HYPRE_Int coarse_counter; /* Interpolation weight variables */ HYPRE_Real sum, diagonal, distribute; HYPRE_Int strong_f_marker = -2; /* Loop variables */ /*HYPRE_Int index;*/ HYPRE_Int start_indexing = 0; HYPRE_Int i, i1, i2, jj, kk, k1, jj1; HYPRE_BigInt big_k1; /* Definitions */ HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Real wall_time; hypre_ParCSRCommPkg *extend_comm_pkg = NULL; if (debug_flag==4) wall_time = time_getWallclockSeconds(); /* BEGIN */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION my_first_cpt = num_cpts_global[0]; if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm); #else my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Set up off processor information (specifically for neighbors of * neighbors */ full_off_procNodes = 0; if (num_procs > 1) { hypre_exchange_interp_data( &CF_marker_offd, &dof_func_offd, &A_ext, &full_off_procNodes, &Sop, &extend_comm_pkg, A, CF_marker, S, num_functions, dof_func, 1); { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_EXTENDED_I_INTERP] += hypre_MPI_Wtime(); #endif } A_ext_i = hypre_CSRMatrixI(A_ext); A_ext_j = hypre_CSRMatrixBigJ(A_ext); A_ext_data = hypre_CSRMatrixData(A_ext); Sop_i = hypre_CSRMatrixI(Sop); Sop_j = hypre_CSRMatrixBigJ(Sop); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); if (n_fine) { fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); } if (full_off_procNodes) { P_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, full_off_procNodes, HYPRE_MEMORY_HOST); tmp_CF_marker_offd = hypre_CTAlloc(HYPRE_Int, full_off_procNodes, HYPRE_MEMORY_HOST); } hypre_initialize_vecs(n_fine, full_off_procNodes, fine_to_coarse, fine_to_coarse_offd, P_marker, P_marker_offd, tmp_CF_marker_offd); jj_counter = start_indexing; jj_counter_offd = start_indexing; coarse_counter = 0; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { P_diag_i[i] = jj_counter; if (num_procs > 1) P_offd_i[i] = jj_counter_offd; if (CF_marker[i] >= 0) { jj_counter++; fine_to_coarse[i] = coarse_counter; coarse_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i, or C-points that stronly influence F-points * that strongly influence i. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { /* i1 is a C point */ if (P_marker[i1] < P_diag_i[i]) { P_marker[i1] = jj_counter; jj_counter++; } } else if (CF_marker[i1] != -3) { /* i1 is a F point, loop through it's strong neighbors */ for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < P_diag_i[i]) { P_marker[k1] = jj_counter; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < P_offd_i[i]) { tmp_CF_marker_offd[k1] = 1; P_marker_offd[k1] = jj_counter_offd; jj_counter_offd++; } } } } } } /* Look at off diag strong connections of i */ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if (CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < P_offd_i[i]) { tmp_CF_marker_offd[i1] = 1; P_marker_offd[i1] = jj_counter_offd; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { /* F point; look at neighbors of i1. Sop contains global col * numbers and entries that could be in S_diag or S_offd or * neither. */ for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; if (big_k1 >= col_1 && big_k1 < col_n) { /* In S_diag */ loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < P_diag_i[i]) { P_marker[loc_col] = jj_counter; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < P_offd_i[i]) { P_marker_offd[loc_col] = jj_counter_offd; tmp_CF_marker_offd[loc_col] = 1; jj_counter_offd++; } } } } } } } } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d determine structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ if (debug_flag== 4) wall_time = time_getWallclockSeconds(); P_diag_size = jj_counter; P_offd_size = jj_counter_offd; if (P_diag_size) { P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); } if (P_offd_size) { P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); } P_diag_i[n_fine] = jj_counter; P_offd_i[n_fine] = jj_counter_offd; jj_counter = start_indexing; jj_counter_offd = start_indexing; /* Fine to coarse mapping */ if (num_procs > 1) { hypre_big_insert_new_nodes(comm_pkg, extend_comm_pkg, fine_to_coarse, full_off_procNodes, my_first_cpt, fine_to_coarse_offd); } for (i = 0; i < n_fine; i++) P_marker[i] = -1; for (i = 0; i < full_off_procNodes; i++) P_marker_offd[i] = -1; /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ for (i = 0; i < n_fine; i++) { jj_begin_row = jj_counter; jj_begin_row_offd = jj_counter_offd; /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else if (CF_marker[i] != -3) { strong_f_marker--; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { if (P_marker[i1] < jj_begin_row) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } } else if (CF_marker[i1] != -3) { P_marker[i1] = strong_f_marker; for (kk = S_diag_i[i1]; kk < S_diag_i[i1+1]; kk++) { k1 = S_diag_j[kk]; if (CF_marker[k1] >= 0) { if (P_marker[k1] < jj_begin_row) { P_marker[k1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[k1]; P_diag_data[jj_counter] = zero; jj_counter++; } } } if (num_procs > 1) { for (kk = S_offd_i[i1]; kk < S_offd_i[i1+1]; kk++) { if (col_offd_S_to_A) k1 = col_offd_S_to_A[S_offd_j[kk]]; else k1 = S_offd_j[kk]; if (CF_marker_offd[k1] >= 0) { if (P_marker_offd[k1] < jj_begin_row_offd) { P_marker_offd[k1] = jj_counter_offd; P_offd_j[jj_counter_offd] = k1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } if ( num_procs > 1) { for (jj=S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (col_offd_S_to_A) i1 = col_offd_S_to_A[i1]; if ( CF_marker_offd[i1] >= 0) { if (P_marker_offd[i1] < jj_begin_row_offd) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } else if (CF_marker_offd[i1] != -3) { P_marker_offd[i1] = strong_f_marker; for (kk = Sop_i[i1]; kk < Sop_i[i1+1]; kk++) { big_k1 = Sop_j[kk]; /* Find local col number */ if (big_k1 >= col_1 && big_k1 < col_n) { loc_col = (HYPRE_Int)(big_k1-col_1); if (P_marker[loc_col] < jj_begin_row) { P_marker[loc_col] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[loc_col]; P_diag_data[jj_counter] = zero; jj_counter++; } } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] < jj_begin_row_offd) { P_marker_offd[loc_col] = jj_counter_offd; P_offd_j[jj_counter_offd]=loc_col; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } } } } } } jj_end_row = jj_counter; jj_end_row_offd = jj_counter_offd; diagonal = A_diag_data[A_diag_i[i]]; for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++) { /* i1 is a c-point and strongly influences i, accumulate * a_(i,i1) into interpolation weight */ i1 = A_diag_j[jj]; if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += A_diag_data[jj]; } else if (P_marker[i1] == strong_f_marker) { sum = zero; sgn = 1; if (A_diag_data[A_diag_i[i1]] < 0) sgn = -1; /* Loop over row of A for point i1 and calculate the sum * of the connections to c-points that strongly incluence i. */ for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if ((P_marker[i2] >= jj_begin_row ) && (sgn*A_diag_data[jj1]) < 0) sum += A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1< A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) sum += A_offd_data[jj1]; } } if (sum != 0) { distribute = A_diag_data[jj]/sum; /* Loop over row of A for point i1 and do the distribution */ for (jj1 = A_diag_i[i1]+1; jj1 < A_diag_i[i1+1]; jj1++) { i2 = A_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row && (sgn*A_diag_data[jj1]) < 0) P_diag_data[P_marker[i2]] += distribute*A_diag_data[jj1]; } if (num_procs > 1) { for (jj1 = A_offd_i[i1]; jj1 < A_offd_i[i1+1]; jj1++) { i2 = A_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd && (sgn*A_offd_data[jj1]) < 0) P_offd_data[P_marker_offd[i2]] += distribute*A_offd_data[jj1]; } } } else { diagonal += A_diag_data[jj]; } } /* neighbor i1 weakly influences i, accumulate a_(i,i1) into * diagonal */ else if (CF_marker[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func[i1]) diagonal += A_diag_data[jj]; } } if (num_procs > 1) { for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++) { i1 = A_offd_j[jj]; if (P_marker_offd[i1] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i1]] += A_offd_data[jj]; else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row ) sum += A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) sum += A_ext_data[jj1]; } } if (sum != 0) { distribute = A_offd_data[jj] / sum; for (jj1 = A_ext_i[i1]; jj1 < A_ext_i[i1+1]; jj1++) { big_k1 = A_ext_j[jj1]; if (big_k1 >= col_1 && big_k1 < col_n) { /* diag */ loc_col = (HYPRE_Int)(big_k1 - col_1); if (P_marker[loc_col] >= jj_begin_row) P_diag_data[P_marker[loc_col]] += distribute* A_ext_data[jj1]; } else { loc_col = -(HYPRE_Int)big_k1 - 1; if (P_marker_offd[loc_col] >= jj_begin_row_offd) P_offd_data[P_marker_offd[loc_col]] += distribute* A_ext_data[jj1]; } } } else { diagonal += A_offd_data[jj]; } } else if (CF_marker_offd[i1] != -3) { if (num_functions == 1 || dof_func[i] == dof_func_offd[i1]) diagonal += A_offd_data[jj]; } } } if (diagonal) { for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= -diagonal; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= -diagonal; } } strong_f_marker--; } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d fill structure %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(A), total_global_cpts, hypre_ParCSRMatrixColStarts(A), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; hypre_CSRMatrixMemoryLocation(P_diag) = HYPRE_MEMORY_HOST; hypre_CSRMatrixMemoryLocation(P_offd) = HYPRE_MEMORY_HOST; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0 || max_elmts > 0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, max_elmts); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } /* This builds col_map, col_map should be monotone increasing and contain * global numbers. */ if (P_offd_size) { hypre_build_interp_colmap(P, full_off_procNodes, tmp_CF_marker_offd, fine_to_coarse_offd); } hypre_MatvecCommPkgCreate(P); for (i=0; i < n_fine; i++) if (CF_marker[i] == -3) CF_marker[i] = -1; *P_ptr = P; /* Deallocate memory */ hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); if (num_procs > 1) { hypre_CSRMatrixDestroy(Sop); hypre_CSRMatrixDestroy(A_ext); hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_CF_marker_offd, HYPRE_MEMORY_HOST); if (num_functions > 1) { hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); } hypre_MatvecCommPkgDestroy(extend_comm_pkg); } return hypre_error_flag; }
construction.h
/* An Experimental Study on Hub Labeling based Shortest Path Algorithms [Experiments and Analyses] Authors: Ye Li, Leong Hou U, Man Lung Yiu, Ngai Meng Kou Contact: yb47438@umac.mo Affiliation: University of Macau The MIT License (MIT) Copyright (c) 2016 University of Macau Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #pragma once #ifndef CONSTRUCTION_H #define CONSTRUCTION_H #include <queue> #include <set> #include "graph.h" #include "paras.h" #include "labels.h" #include "ordering.h" #include "heap.h" #include "graph_search.h" #include <omp.h> #include <unordered_map> #define numOfVertices SP_Constants::numOfVertices #define numOfEdges SP_Constants::numOfEdges #define INF_WEIGHT SP_Constants::INF_WEIGHT class construction { public: Label labels; DLabel dlabels; PLabel plabels; DPLabel dplabels; CLabel clabels; Ordering orders; }; class PL : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; PL(Graph &graph, Ordering &orders) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid){ NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } } PL(Graph &graph, Ordering &orders, bool D_FLAGS) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = dlabels.index_; vector<index_t>& bindex_ = dlabels.bindex_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /* vector<vector<NodeID> > &adj = graph.adj; vector<vector<NodeID> > &r_adj = graph.r_adj;*/ // Array Representation vector<EdgeID>& vertices = graph.vertices; vector<EdgeID>& r_vertices = graph.r_vertices; vector<NodeID>& edges = graph.edges; vector<NodeID>& r_edges = graph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); // Backward labels. vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); // Forward labels of root. vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); // Backward labels of root. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; // Forward search. // Initialize forward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the forward labels of r and backward labels of v in the forward search from r when reaching v. for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ // Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid){ NodeID w = edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_forward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // Backward search. // Initialize backward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the backward labels of r and forward labels of v in the backward search from r when reaching v (v->r path). for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i];*/ // Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_backward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); } } void TD_BP_UNDIRECTED(Graph& graph, Ordering &orderes, int kNumBitParallelRoots, bool directed = false, bool bp = true) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } } void TP_path(Graph &graph, Ordering &orders) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t_path>& index_ = plabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<NodeID> parents(numOfVertices, numOfVertices); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<NodeID> > > tmp_idx_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; parents[r] = inv[r]; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &tmp_idx_parent_v = tmp_idx_parents[v]; index_t_path &idx_v = index_[inv[v]]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_parent_v.first.back() = r; tmp_idx_parent_v.second.back() = parents[v]; tmp_idx_parent_v.first.push_back(numOfVertices); tmp_idx_parent_v.second.push_back(numOfVertices); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { NodeID w = graph.edges[eid]; if (!vis[w]) { parents[w] = inv[v]; que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) { vis[que[i]] = false; parents[que[i]] = numOfVertices; } for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_p[i] = tmp_idx_parents[v].second[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); tmp_idx_parents[v].first.clear(); tmp_idx_parents[v].second.clear(); tmp_idx_parents[v].first.shrink_to_fit(); tmp_idx_parents[v].second.shrink_to_fit(); } } void TP_path_d(Graph &graph, Ordering &orders) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t_path>& index_ = dplabels.index_; vector<index_t_path>& bindex_ = dplabels.bindex_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /* vector<vector<NodeID> > &adj = graph.adj; vector<vector<NodeID> > &r_adj = graph.r_adj;*/ // Array Representation vector<EdgeID>& vertices = graph.vertices; vector<EdgeID>& r_vertices = graph.r_vertices; vector<NodeID>& edges = graph.edges; vector<NodeID>& r_edges = graph.r_edges; vector<bool> usd(numOfVertices, false); vector<NodeID> parents(numOfVertices, numOfVertices); vector<NodeID> r_parents(numOfVertices, numOfVertices); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<NodeID> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); // Backward labels. vector<pair<vector<NodeID>, vector<NodeID> > > r_tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); // Backward labels. vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); // Forward labels of root. vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); // Backward labels of root. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; // Forward search. // Initialize forward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; parents[r] = inv[r]; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &r_tmp_idx_parent_v = r_tmp_idx_parent[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the forward labels of r and backward labels of v in the forward search from r when reaching v. for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); r_tmp_idx_parent_v.first.back() = r; r_tmp_idx_parent_v.second.back() = parents[v]; r_tmp_idx_parent_v.first.push_back(numOfVertices); r_tmp_idx_parent_v.second.push_back(numOfVertices); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ // Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid]; if (!vis[w]) { parents[w] = inv[v]; que[que_h++] = w; vis[w] = true; } } pruned_forward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) { vis[que[i]] = false; parents[que[i]] = numOfVertices; } for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; parents[r] = inv[r]; // Backward search. // Initialize backward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &tmp_idx_parent_v = tmp_idx_parent[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the backward labels of r and forward labels of v in the backward search from r when reaching v (v->r path). for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i];*/ // Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid]; if (!vis[w]) { parents[w] = inv[v]; que[que_h++] = w; vis[w] = true; } } pruned_backward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) { vis[que[i]] = false; parents[que[i]] = numOfVertices; } for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_p[i] = tmp_idx_parent[v].second[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx_parent[v].first.clear(); tmp_idx_parent[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); tmp_idx_parent[v].first.shrink_to_fit(); tmp_idx_parent[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); bindex_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_p[i] = r_tmp_idx_parent[v].second[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx_parent[v].first.clear(); r_tmp_idx_parent[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); r_tmp_idx_parent[v].first.shrink_to_fit(); r_tmp_idx_parent[v].second.shrink_to_fit(); } } PL(Graph &graph, Ordering &orders, bool path_flags, bool directed_flags) { if (path_flags == true) { if (directed_flags == true) { TP_path_d(graph, orders); } else { TP_path(graph, orders); } } } /* PL(Graph &graph, Ordering &orders, bool path_flags, bool directed_flags, bool bp_flags, int kNumBitParallelRoots) { if (path_flags == true) { if (directed_flags == true) { TP_path_d(graph, orders); } else { TP_path(graph, orders); } } else { if (directed_flags == false) { TP_BP(graph, orders, kNumBitParallelRoots); } } } */ }; template<int kNumBitParallelRoots = 50> class BPL { //typedef BPLabel<kNumBitParallelRoots>::index_t_bp index_t_bp; public: BPLabel<kNumBitParallelRoots> bplabels; DBPLabel<kNumBitParallelRoots> dbplabels; BPL(Graph &graph, Ordering &orders) { //bplabels = BPLabel(kNumBitParallelRoots); //kNumBitParallelRoots = 64; //bplabels.setParas(kNumBitParallelRoots); // iteration_generated.resize(numOfVertices); // pruning_power.resize(numOfVertices); index_t_bp<kNumBitParallelRoots>*& index_ = bplabels.index_bp; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); { vector<EdgeWeight> tmp_d(numOfVertices); vector<std::pair<uint64_t, uint64_t> > tmp_s(numOfVertices); vector<NodeID> que(numOfVertices); vector<std::pair<NodeID, NodeID> > sibling_es(numOfEdges); vector<std::pair<NodeID, NodeID> > child_es(numOfEdges); cout << "Building BP labels" << endl; index_ = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>)); //for (NodeID v = 0; v < numOfVertices; ++v) { // index_t_bp &idx = index_[v]; // idx.bpspt_d = (EdgeWeight*)memalign(64, kNumBitParallelRoots * sizeof(EdgeWeight)); // idx.bpspt_s = (uint64_t*)memalign(64, kNumBitParallelRoots * 2 * sizeof(uint64_t)); // /*for (int i = 0; i < kNumBitParallelRoots; ++i) { // idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t)); // }*/ //} int r = 0; for (int i_bpspt = 0; i_bpspt < kNumBitParallelRoots; ++i_bpspt) { while (r < numOfVertices && usd[r]) ++r; if (r == numOfVertices) { for (NodeID v = 0; v < numOfVertices; ++v) index_[v].bpspt_d[i_bpspt] = INF_WEIGHT; continue; } usd[r] = true; fill(tmp_d.begin(), tmp_d.end(), INF_WEIGHT); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); int que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; vector<int> vs; vector<NodeID> adj_r(graph.vertices[r + 1] - graph.vertices[r]); for (EdgeID eid = graph.vertices[r]; eid < graph.vertices[r + 1]; eid++) { adj_r[eid - graph.vertices[r]] = graph.edges[eid]; } sort(adj_r.begin(), adj_r.end()); for (size_t i = 0; i < adj_r.size(); ++i) { NodeID v = adj_r[i]; if (!usd[v]) { usd[v] = true; que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; vs.push_back(v); if (++ns == 64) break; } } for (EdgeWeight d = 0; que_t0 < que_h; ++d) { int num_sibling_es = 0, num_child_es = 0; for (int que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; eid++) { NodeID tv = graph.edges[eid]; EdgeWeight td = d + 1; if (d > tmp_d[tv]); else if (d == tmp_d[tv]) { if (v < tv) { sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; } } else { if (tmp_d[tv] == INF_WEIGHT) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; } } } for (int i = 0; i < num_sibling_es; ++i) { int v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (int i = 0; i < num_child_es; ++i) { int v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (NodeID v = 0; v < numOfVertices; ++v) { index_[inv[v]].bpspt_d[i_bpspt] = tmp_d[v]; index_[inv[v]].bpspt_s[i_bpspt][0] = tmp_s[v].first; index_[inv[v]].bpspt_s[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; } } } cout << "Building normal labels" << endl; for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; index_t_bp<kNumBitParallelRoots> &idx_r = index_[inv[r]]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; index_t_bp<kNumBitParallelRoots> &idx_v = index_[inv[v]]; // Prefetch _mm_prefetch(&idx_v.bpspt_d[0], _MM_HINT_T0); _mm_prefetch(&idx_v.bpspt_s[0][0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v.first[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v.second[0], _MM_HINT_T0); if (usd[v]) continue; for (int i = 0; i < kNumBitParallelRoots; ++i) { EdgeWeight td = idx_r.bpspt_d[i] + idx_v.bpspt_d[i]; if (td - 2 <= d) { /*td += (idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][0]) ? -2 : ((idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][1]) | (idx_r.bpspt_s[i][1] & idx_v.bpspt_s[i][0])) ? -1 : 0;*/ td += (idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][0]) ? -2 : ((idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][1]) | (idx_r.bpspt_s[i][1] & idx_v.bpspt_s[i][0])) ? -1 : 0; if (td <= d) goto pruned; } } for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { //pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); //iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); //index_[inv[v]].spt_v.resize(k); //index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_v = (NodeID*)memalign(64, k * sizeof(NodeID)); index_[inv[v]].spt_d = (EdgeWeight*)memalign(64, k * sizeof(EdgeWeight)); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } } BPL(Graph &graph, Ordering &orders, bool directed_flags) { //bplabels = BPLabel(kNumBitParallelRoots); //kNumBitParallelRoots = 64; //bplabels.setParas(kNumBitParallelRoots); // iteration_generated.resize(numOfVertices); // pruning_power.resize(numOfVertices); if (directed_flags == false) return; index_t_bp<kNumBitParallelRoots>*& index_ = dbplabels.index_bp; index_t_bp<kNumBitParallelRoots>*& bindex_ = dbplabels.bindex_bp; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<bool> r_usd(numOfVertices, false); // vector<bool> bp_usd(numOfVertices, false); // vector<bool> r_bp_usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); // Backward labels of root. { vector<EdgeWeight> tmp_d(numOfVertices); vector<std::pair<uint64_t, uint64_t> > tmp_s(numOfVertices); vector<EdgeWeight> r_tmp_d(numOfVertices); vector<std::pair<uint64_t, uint64_t> > r_tmp_s(numOfVertices); vector<NodeID> que(numOfVertices); vector<std::pair<NodeID, NodeID> > sibling_es(numOfEdges); vector<std::pair<NodeID, NodeID> > child_es(numOfEdges); vector<std::pair<NodeID, NodeID> > r_sibling_es(numOfEdges); vector<std::pair<NodeID, NodeID> > r_child_es(numOfEdges); cout << "Building BP labels" << endl; index_ = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>)); bindex_ = (index_t_bp<kNumBitParallelRoots>*)memalign(64, numOfVertices * sizeof(index_t_bp<kNumBitParallelRoots>)); //for (NodeID v = 0; v < numOfVertices; ++v) { // index_t_bp &idx = index_[v]; // idx.bpspt_d = (EdgeWeight*)memalign(64, kNumBitParallelRoots * sizeof(EdgeWeight)); // idx.bpspt_s = (uint64_t*)memalign(64, kNumBitParallelRoots * 2 * sizeof(uint64_t)); // /*for (int i = 0; i < kNumBitParallelRoots; ++i) { // idx.bpspt_s[i] = (uint64_t*)memalign(64, 2 * sizeof(uint64_t)); // }*/ //} int r = 0; for (int i_bpspt = 0; i_bpspt < kNumBitParallelRoots; ++i_bpspt) { while (r < numOfVertices && usd[r] ) ++r; if (r == numOfVertices) { for (NodeID v = 0; v < numOfVertices; ++v) { index_[v].bpspt_d[i_bpspt] = INF_WEIGHT; bindex_[v].bpspt_d[i_bpspt] = INF_WEIGHT; } continue; } r_usd[r] = true; //r_bp_usd[r] = true; //forward search fill(r_tmp_d.begin(), r_tmp_d.end(), INF_WEIGHT); fill(r_tmp_s.begin(), r_tmp_s.end(), std::make_pair(0, 0)); int que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; r_tmp_d[r] = 0; que_t1 = que_h; int ns = 0; vector<int> vs; vector<NodeID> adj_r(graph.vertices[r + 1] - graph.vertices[r]); for (EdgeID eid = graph.vertices[r]; eid < graph.vertices[r + 1]; eid++) { adj_r[eid - graph.vertices[r]] = graph.edges[eid]; } sort(adj_r.begin(), adj_r.end()); vector<NodeID> r_adj_r(graph.r_vertices[r + 1] - graph.r_vertices[r]); for (EdgeID eid = graph.r_vertices[r]; eid < graph.r_vertices[r + 1]; eid++) { r_adj_r[eid - graph.r_vertices[r]] = graph.r_edges[eid]; } sort(r_adj_r.begin(), r_adj_r.end()); vector<NodeID> common_adj; set_intersection(adj_r.begin(), adj_r.end(), r_adj_r.begin(), r_adj_r.end(), back_inserter(common_adj)); sort(common_adj.begin(), common_adj.end()); for (size_t i = 0; i < common_adj.size(); ++i) { NodeID v = common_adj[i]; if (!r_usd[v]) { r_usd[v] = true; que[que_h++] = v; r_tmp_d[v] = 1; r_tmp_s[v].first = 1ULL << ns; vs.push_back(v); if (++ns == 64) break; } } for (EdgeWeight d = 0; que_t0 < que_h; ++d) { int num_sibling_es = 0, num_child_es = 0; for (int que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; eid++) { NodeID tv = graph.edges[eid]; EdgeWeight td = d + 1; if (d > r_tmp_d[tv]); else if (d == r_tmp_d[tv]) { // if (v < tv) { r_sibling_es[num_sibling_es].first = v; r_sibling_es[num_sibling_es].second = tv; ++num_sibling_es; //} } else { if (r_tmp_d[tv] == INF_WEIGHT) { que[que_h++] = tv; r_tmp_d[tv] = td; } r_child_es[num_child_es].first = v; r_child_es[num_child_es].second = tv; ++num_child_es; } } } for (int i = 0; i < num_sibling_es; ++i) { int v = r_sibling_es[i].first, w = r_sibling_es[i].second; //r_tmp_s[v].second |= r_tmp_s[w].first; r_tmp_s[w].second |= r_tmp_s[v].first; } for (int i = 0; i < num_child_es; ++i) { int v = r_child_es[i].first, c = r_child_es[i].second; r_tmp_s[c].first |= r_tmp_s[v].first; r_tmp_s[c].second |= r_tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (NodeID v = 0; v < numOfVertices; ++v) { bindex_[inv[v]].bpspt_d[i_bpspt] = r_tmp_d[v]; bindex_[inv[v]].bpspt_s[i_bpspt][0] = r_tmp_s[v].first; bindex_[inv[v]].bpspt_s[i_bpspt][1] = r_tmp_s[v].second & ~r_tmp_s[v].first; } //forward search end //backward usd[r] = true; fill(tmp_d.begin(), tmp_d.end(), INF_WEIGHT); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; ns = 0; vs.clear(); for (size_t i = 0; i < common_adj.size(); ++i) { NodeID v = common_adj[i]; if (!usd[v]) { usd[v] = true; que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; vs.push_back(v); if (++ns == 64) break; } } for (EdgeWeight d = 0; que_t0 < que_h; ++d) { int num_sibling_es = 0, num_child_es = 0; for (int que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; for (EdgeID eid = graph.r_vertices[v]; eid < graph.r_vertices[v + 1]; eid++) { NodeID tv = graph.r_edges[eid]; EdgeWeight td = d + 1; if (d > tmp_d[tv]); else if (d == tmp_d[tv]) { //if (v < tv) { sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; //} } else { if (tmp_d[tv] == INF_WEIGHT) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; } } } for (int i = 0; i < num_sibling_es; ++i) { int v = sibling_es[i].first, w = sibling_es[i].second; //tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (int i = 0; i < num_child_es; ++i) { int v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (NodeID v = 0; v < numOfVertices; ++v) { index_[inv[v]].bpspt_d[i_bpspt] = tmp_d[v]; index_[inv[v]].bpspt_s[i_bpspt][0] = tmp_s[v].first; index_[inv[v]].bpspt_s[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; } } } cout << "Building normal labels" << endl; //for (size_t r = 0; r < numOfVertices; ++r) { // if (usd[r]) continue; // const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; // index_t_bp<kNumBitParallelRoots> &idx_r = index_[inv[r]]; // for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { // dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; // } // NodeID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // vis[r] = true; // que_t1 = que_h; // for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { // for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { // NodeID v = que[que_i]; // pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; // index_t_bp<kNumBitParallelRoots> &idx_v = index_[inv[v]]; // // Prefetch // _mm_prefetch(&idx_v.bpspt_d[0], _MM_HINT_T0); // _mm_prefetch(&idx_v.bpspt_s[0][0], _MM_HINT_T0); // _mm_prefetch(&tmp_idx_v.first[0], _MM_HINT_T0); // _mm_prefetch(&tmp_idx_v.second[0], _MM_HINT_T0); // if (usd[v]) continue; // for (int i = 0; i < kNumBitParallelRoots; ++i) { // EdgeWeight td = idx_r.bpspt_d[i] + idx_v.bpspt_d[i]; // if (td - 2 <= d) { // td += // (idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][0]) ? -2 : // ((idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][1]) | // (idx_r.bpspt_s[i][1] & idx_v.bpspt_s[i][0])) // ? -1 : 0; // if (td <= d) goto pruned; // } // } // for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { // NodeID w = tmp_idx_v.first[i]; // EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; // if (td <= d) { // //pruning_power[w]++; // goto pruned; // } // } // // Traverse // tmp_idx_v.first.back() = r; // tmp_idx_v.second.back() = d; // tmp_idx_v.first.push_back(numOfVertices); // tmp_idx_v.second.push_back(INF_WEIGHT); // //iteration_generated[r]++; // /*for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i];*/ // for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { // NodeID w = graph.edges[eid]; // if (!vis[w]) { // que[que_h++] = w; // vis[w] = true; // } // } // pruned: // {} // } // que_t0 = que_t1; // que_t1 = que_h; // } // for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; // for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) // dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // usd[r] = true; //} for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; // Forward search. // Initialize forward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; index_t_bp<kNumBitParallelRoots> &idx_r = index_[inv[r]]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; index_t_bp<kNumBitParallelRoots> &r_idx_v = bindex_[inv[v]]; //index_t &idx_v = index_[inv[v]]; // Prefetch _mm_prefetch(&r_idx_v.bpspt_d[0], _MM_HINT_T0); _mm_prefetch(&r_idx_v.bpspt_s[0][0], _MM_HINT_T0); _mm_prefetch(&r_tmp_idx_v.first[0], _MM_HINT_T0); _mm_prefetch(&r_tmp_idx_v.second[0], _MM_HINT_T0); if (usd[v]) continue; for (int i = 0; i < kNumBitParallelRoots; ++i) { EdgeWeight td = idx_r.bpspt_d[i] + r_idx_v.bpspt_d[i]; if (td - 2 <= d) { td += (idx_r.bpspt_s[i][0] & r_idx_v.bpspt_s[i][0]) ? -2 : ((idx_r.bpspt_s[i][0] & r_idx_v.bpspt_s[i][1]) | (idx_r.bpspt_s[i][1] & r_idx_v.bpspt_s[i][0])) ? -1 : 0; if (td <= d) goto pruned_forward; } } // Pruned by the forward labels of r and backward labels of v in the forward search from r when reaching v. for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= d) { goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ // Array Representation for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid) { NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_forward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // Backward search. // Initialize backward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; index_t_bp<kNumBitParallelRoots> &r_idx_r = bindex_[inv[r]]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; index_t_bp<kNumBitParallelRoots> &idx_v = index_[inv[v]]; //index_t &idx_v = index_[inv[v]]; _mm_prefetch(&idx_v.bpspt_d[0], _MM_HINT_T0); _mm_prefetch(&idx_v.bpspt_s[0][0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v.first[0], _MM_HINT_T0); _mm_prefetch(&tmp_idx_v.second[0], _MM_HINT_T0); if (usd[v]) continue; for (int i = 0; i < kNumBitParallelRoots; ++i) { EdgeWeight td = r_idx_r.bpspt_d[i] + idx_v.bpspt_d[i]; if (td - 2 <= d) { /*td += (r_idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][0]) ? -2 : ((r_idx_r.bpspt_s[i][0] & idx_v.bpspt_s[i][1]) | (r_idx_r.bpspt_s[i][1] & idx_v.bpspt_s[i][0])) ? -1 : 0;*/ td += ( idx_v.bpspt_s[i][0]) & r_idx_r.bpspt_s[i][0] ? -2 : ((idx_v.bpspt_s[i][1] & r_idx_r.bpspt_s[i][0]) | (idx_v.bpspt_s[i][0] & r_idx_r.bpspt_s[i][1])) ? -1 : 0; if (td <= d) goto pruned_backward; } } // Pruned by the backward labels of r and forward labels of v in the backward search from r when reaching v (v->r path). for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= d) { goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i];*/ // Array Representation for (EdgeID eid = graph.r_vertices[v]; eid < graph.r_vertices[v + 1]; ++eid) { NodeID w = graph.r_edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_backward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); //index_[inv[v]].spt_v.resize(k); //index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_v = (NodeID*)memalign(64, k * sizeof(NodeID)); index_[inv[v]].spt_d = (EdgeWeight*)memalign(64, k * sizeof(EdgeWeight)); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); //index_[inv[v]].spt_v.resize(k); //index_[inv[v]].spt_d.resize(k); bindex_[inv[v]].spt_v = (NodeID*)memalign(64, k * sizeof(NodeID)); bindex_[inv[v]].spt_d = (EdgeWeight*)memalign(64, k * sizeof(EdgeWeight)); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); } } }; class PL_W : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; PL_W(WGraph &wgraph, Ordering &orders) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; // vector<vector<NodeID> > &adj = wgraph.adj; // vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight; vector<EdgeID>& vertices = wgraph.vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); long pop = 0; double hsize = 0; for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; //fprintf(stderr,"%d\n",r); const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; long max_heap_size = 0; long heap_size = 1; while (!pqueue.empty()) { pop++; heap_size--; NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i]; // EdgeWeight w_d = adj_weight[v][i] + v_d; for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] == INF_WEIGHT) { heap_size++; if (max_heap_size < heap_size) max_heap_size = heap_size; } if( distances[w] > w_d ){ pqueue.update(w, w_d); distances[w] = w_d; } } } pruned: {} } hsize = hsize + max_heap_size; while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; pqueue.clear(vis_v); } pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } //cout << "total pop:" << pop << endl; //cout << "heap size:" << (double)hsize / (double)numOfVertices << endl; double count = 0; for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); count = count + k - 1; index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } cout << "Average Label Size:" << count / numOfVertices << endl; } PL_W(WGraph &wgraph, Ordering &orders, bool DIRECTED, bool PATH_QUERY) { // Generating Path Labels iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t_path>& index_ = plabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; // vector<vector<NodeID> > &adj = wgraph.adj; // vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight; vector<EdgeID>& vertices = wgraph.vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<NodeID> > > tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); vector<NodeID> parents(numOfVertices, numOfVertices); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } parents[r] = inv[r]; pqueue.update(r, 0); //vis[r] = true; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &tmp_idx_parent_v = tmp_idx_parent[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_parent_v.first.back() = r; tmp_idx_parent_v.second.back() = parents[v]; tmp_idx_parent_v.first.push_back(numOfVertices); tmp_idx_parent_v.second.push_back(numOfVertices); iteration_generated[r]++; // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i]; // EdgeWeight w_d = adj_weight[v][i] + v_d; for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; parents[w] = inv[v]; } } } pruned: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; parents[vis_v] = numOfVertices; pqueue.clear(vis_v); } pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_p[i] = tmp_idx_parent[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx_parent[v].first.clear(); tmp_idx_parent[v].second.clear(); } } PL_W(WGraph &wgraph, Ordering &orders, bool D_FLAGS) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = dlabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /*vector<vector<NodeID> > &adj = wgraph.adj; vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight;*/ vector<index_t>& bindex_ = dlabels.bindex_;/* vector<vector<NodeID> > &r_adj = wgraph.r_adj; vector<vector<EdgeWeight> > &r_adj_weight = wgraph.r_adj_weight;*/ //Array Representation vector<EdgeID>& vertices = wgraph.vertices; vector<EdgeID>& r_vertices = wgraph.r_vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<NodeEdgeWeightPair>& r_edges = wgraph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); // Forward search from r. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = v_d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /* for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i]; EdgeWeight w_d = adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_forward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; //pqueue.clear(vis_v); } //pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // Backward search from r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } pqueue.update(r, 0); while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i]; EdgeWeight w_d = r_adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid].first; EdgeWeight w_d = r_edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_backward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; //pqueue.clear(vis_v); } // pqueue.clear_n(); for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); } } PL_W(WGraph &wgraph, Ordering &orders, bool D_FLAGS, bool PATH_QUERY, bool dwpath) { iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t_path>& index_ = dplabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /*vector<vector<NodeID> > &adj = wgraph.adj; vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight;*/ vector<index_t_path>& bindex_ = dplabels.bindex_;/* vector<vector<NodeID> > &r_adj = wgraph.r_adj; vector<vector<EdgeWeight> > &r_adj_weight = wgraph.r_adj_weight;*/ //Array Representation vector<EdgeID>& vertices = wgraph.vertices; vector<EdgeID>& r_vertices = wgraph.r_vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<NodeEdgeWeightPair>& r_edges = wgraph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<NodeID> parents(numOfVertices, numOfVertices); vector<pair<vector<NodeID>, vector<NodeID> > > tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); vector<NodeID> r_parents(numOfVertices, numOfVertices); vector<pair<vector<NodeID>, vector<NodeID> > > r_tmp_idx_parent(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<NodeID>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); // Forward search from r. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); distances[r] = 0; parents[r] = inv[r]; //vis[r] = true; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &r_tmp_idx_parent_v = r_tmp_idx_parent[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = v_d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; r_tmp_idx_parent_v.first.back() = r; r_tmp_idx_parent_v.second.back() = parents[v]; r_tmp_idx_parent_v.first.push_back(numOfVertices); r_tmp_idx_parent_v.second.push_back(numOfVertices); /* for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i]; EdgeWeight w_d = adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { parents[w] = inv[v]; pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_forward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; parents[vis_v] = numOfVertices; //pqueue.clear(vis_v); } //pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; // Backward search from r. const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } pqueue.update(r, 0); r_parents[r] = inv[r]; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<NodeID> > &tmp_idx_parent_v = tmp_idx_parent[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if (td <= v_d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_parent_v.first.back() = r; tmp_idx_parent_v.second.back() = r_parents[v]; tmp_idx_parent_v.first.push_back(numOfVertices); tmp_idx_parent_v.second.push_back(numOfVertices); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i]; EdgeWeight w_d = r_adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid].first; EdgeWeight w_d = r_edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; r_parents[w] = inv[v]; } } } pruned_backward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; r_parents[vis_v] = numOfVertices; //pqueue.clear(vis_v); } // pqueue.clear_n(); for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); index_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_p[i] = tmp_idx_parent[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); tmp_idx_parent[v].first.clear(); tmp_idx_parent[v].second.clear(); tmp_idx_parent[v].first.shrink_to_fit(); tmp_idx_parent[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); bindex_[inv[v]].spt_p.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_p[i] = r_tmp_idx_parent[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); r_tmp_idx_parent[v].first.clear(); r_tmp_idx_parent[v].second.clear(); r_tmp_idx_parent[v].first.shrink_to_fit(); r_tmp_idx_parent[v].second.shrink_to_fit(); } } }; class CPL : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; bool SECOND_LEVEL = false; long children_size; long r_children_size; class nodeid_vector_hasher { public: std::size_t operator()(std::pair<NodeID, std::vector<NodeID> > const& pairvec) const { std::size_t seed = pairvec.second.size(); seed ^= pairvec.first + 0x9e3779b9 + (seed << 6) + (seed >> 2); for(auto& i : pairvec.second) { seed ^= i + 0x9e3779b9 + (seed << 6) + (seed >> 2); } return seed; } }; //using boost::hash_combine template <class T> inline void hash_combine(std::size_t& seed, T const& v) { seed ^= std::hash<T>()(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } NodeID convertlist(NodeID& token_id, vector<token_t>& tokens_list, vector<vector<NodeID> >& tmp_tokens, vector<vector<EdgeWeight> >& tmp_tokens_distances, unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher>& token_map, pair<vector<NodeID>, vector<EdgeWeight> > & tmp_idx_v, pair<vector<NodeID>, vector<NodeID> >& tmp_idx_token_parents_v, bool REVERSE){ NodeID lsize = tmp_idx_v.first.size(); NodeID anchorid = tmp_idx_v.first[lsize-2]; for(NodeID i = 0; i < lsize - 1; ++i){ //Add to parent_tree NodeID h = tmp_idx_v.first[i]; NodeID hparent = tmp_idx_token_parents_v.first[i]; EdgeWeight hparent_dis = tmp_idx_token_parents_v.second[i]; NodeID tid = h; // non-trival tokens if(tmp_tokens[h].size() != 0){ vector<NodeID>& tmp_token_h = tmp_tokens[h]; //string tstring = token_string(h, tmp_token_h); vector<EdgeWeight>& tmp_tokens_distances_h = tmp_tokens_distances[h]; pair<NodeID, vector<NodeID> > token_key = make_pair(h, tmp_token_h); // New token if(token_map.find(token_key) == token_map.end()){ token_map[token_key] = token_id; token_t new_token; NodeID csize = tmp_token_h.size(); new_token.sptc_v = (NodeID*)memalign(64, (csize + 1)* sizeof(NodeID)); new_token.sptc_d = (EdgeWeight*)memalign(64, (csize + 1) * sizeof(EdgeWeight)); new_token.sptc_v[0] = h; new_token.sptc_d[0] = csize; if(REVERSE) r_children_size += (csize + 1); else children_size += (csize + 1); for(NodeID j = 0; j < csize; ++j){ new_token.sptc_v[j+1] = tmp_token_h[j]; new_token.sptc_d[j+1] = tmp_tokens_distances_h[j]; } tokens_list.push_back(new_token); tid = token_id; token_id++; }else // Already exist tid = token_map[token_key]; } //trival tokens if(i == lsize - 2) anchorid = tid; if(hparent!=numOfVertices){ tmp_tokens[hparent].push_back(tid); tmp_tokens_distances[hparent].push_back(hparent_dis); } } return anchorid; } void converttokens(vector<pair<vector<NodeID>, vector<EdgeWeight> > > & tmp_idx, vector<pair<vector<NodeID>, vector<NodeID> > >& tmp_idx_token_parents, bool REVERSE){ vector<token_t> tokens_list; vector<token_t> r_tokens_list; vector<vector<NodeID> > tmp_tokens(numOfVertices); vector<vector<EdgeWeight> > tmp_tokens_distances(numOfVertices); vector<vector<NodeID> > r_tmp_tokens(numOfVertices); vector<vector<EdgeWeight> > r_tmp_tokens_distances(numOfVertices); unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher> token_map; unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher> r_token_map; if(REVERSE == false) children_size = 0; else r_children_size = 0; NodeID token_id = numOfVertices; NodeID r_token_id = numOfVertices; if(REVERSE == false) clabels.anchor_p = (NodeID*)memalign(64, (numOfVertices)* sizeof(NodeID)); else clabels.r_anchor_p = (NodeID*)memalign(64, (numOfVertices)* sizeof(NodeID)); //vector<NodeID> que(numOfVertices, numOfVertices); for(NodeID v = 0; v < numOfVertices; ++v){ if(REVERSE == false) clabels.anchor_p[v] = convertlist(token_id, tokens_list, tmp_tokens, tmp_tokens_distances, token_map, tmp_idx[v], tmp_idx_token_parents[v], REVERSE); else clabels.r_anchor_p[v] = convertlist(r_token_id, r_tokens_list, r_tmp_tokens, r_tmp_tokens_distances, r_token_map, tmp_idx[v], tmp_idx_token_parents[v], REVERSE); //if(REVERSE == true) // validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list, clabels.r_anchor_p[v], que); if(REVERSE == false){ NodeID lsize = tmp_idx[v].first.size(); for(NodeID i = 0; i < lsize - 1; ++i){ NodeID h = tmp_idx[v].first[i]; NodeID hparent = tmp_idx_token_parents[v].first[i]; if( hparent != numOfVertices ){ if(tmp_tokens[hparent].empty() == false){ tmp_tokens[hparent].clear(); tmp_tokens_distances[hparent].clear(); } } } }else{ NodeID lsize = tmp_idx[v].first.size(); for(NodeID i = 0; i < lsize - 1; ++i){ NodeID h = tmp_idx[v].first[i]; NodeID hparent = tmp_idx_token_parents[v].first[i]; if( hparent != numOfVertices ){ if(r_tmp_tokens[hparent].empty() == false){ r_tmp_tokens[hparent].clear(); r_tmp_tokens_distances[hparent].clear(); } } } } } //vector<vector<bool> > one_level(tokens_list.size()); if(REVERSE == false){ clabels.numOfTokens = tokens_list.size(); clabels.tokenindex_p = (token_t*)memalign(64, clabels.numOfTokens * sizeof(token_t)); for(NodeID i = 0; i < clabels.numOfTokens; ++i){ clabels.tokenindex_p[i] = tokens_list[i]; } }else{ clabels.r_numOfTokens = r_tokens_list.size(); clabels.r_tokenindex_p = (token_t*)memalign(64, clabels.r_numOfTokens * sizeof(token_t)); for(NodeID i = 0; i < clabels.r_numOfTokens; ++i){ clabels.r_tokenindex_p[i] = r_tokens_list[i]; } } if(REVERSE == false){ if(SECOND_LEVEL){ vector<token_t> supertokens(numOfVertices); convertsupertokens(tokens_list, supertokens); clabels.supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t)); for(NodeID i = 0; i < supertokens.size(); ++i){ clabels.supertokenindex_p[i] = supertokens[i]; } for(NodeID i = 0; i < clabels.numOfTokens; ++i){ clabels.tokenindex_p[i] = tokens_list[i]; } //convertsupertokens(clabels.tokenindex_p, clabels.supertokenindex_p, clabels.numOfTokens); //vector<NodeID> que(numOfVertices, numOfVertices); //for(NodeID v = 0; v < numOfVertices; ++v){ //if(REVERSE) { //cout << v << endl; //validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list,supertokens, clabels.anchor_p[v], que); //validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list, clabels.supertokenindex_p, clabels.anchor_p[v], que); //} //} } }else{ if(SECOND_LEVEL){ //convertsupertokens(clabels.r_tokenindex_p, clabels.r_supertokenindex_p, clabels.r_numOfTokens); vector<token_t> r_supertokens(numOfVertices); convertsupertokens(r_tokens_list, r_supertokens); clabels.r_supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t)); for(NodeID i = 0; i < r_supertokens.size(); ++i){ clabels.r_supertokenindex_p[i] = r_supertokens[i]; } for(NodeID i = 0; i < clabels.r_numOfTokens; ++i){ clabels.r_tokenindex_p[i] = r_tokens_list[i]; } /* //vector<NodeID> que(numOfVertices, numOfVertices); for(NodeID v = 0; v < numOfVertices; ++v){ //if(REVERSE) { cout << v << endl; //validation(tmp_idx[v], tmp_idx_token_parents[v], r_tokens_list, r_supertokens, clabels.r_anchor_p[v], que); validation(tmp_idx[v], tmp_idx_token_parents[v], r_tokens_list, clabels.r_supertokenindex_p, clabels.r_anchor_p[v], que); //} }*/ } } clabels.total_children = children_size; } void convertsupertokens(vector<token_t>& tokens_list, vector<token_t>& supertokens){ vector<unordered_map<NodeID, NodeID> > sorted_sp(numOfVertices); vector<unordered_map<NodeID, EdgeWeight> > dis_sp(numOfVertices); NodeID total_supertoken_children = 0; for(NodeID t = 0 ; t < tokens_list.size(); ++t){ token_t& token = tokens_list[t]; NodeID r = token.sptc_v[0]; EdgeWeight csize = token.sptc_d[0]; unordered_map<NodeID, NodeID>& rc_map = sorted_sp[r]; unordered_map<NodeID, EdgeWeight>& rd_map = dis_sp[r]; for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; rc_map[cid]++; rd_map[cid] = token.sptc_d[i + 1]; } } //supertokens.resize(numOfVertices); // Creating super tokens, sorting the children based on frequency for(NodeID v = 0; v < numOfVertices; ++v){ vector<pair<NodeID, NodeID> > sorted_tmp; unordered_map<NodeID, NodeID>& vc_map = sorted_sp[v]; unordered_map<NodeID, EdgeWeight>& vd_map = dis_sp[v]; for(unordered_map<NodeID, NodeID>::iterator it = vc_map.begin(); it != vc_map.end(); ++it){ sorted_tmp.push_back(make_pair((*it).second, (*it).first)); } sort(sorted_tmp.rbegin(), sorted_tmp.rend()); token_t new_token; EdgeWeight csize = sorted_tmp.size(); new_token.sptc_v = (NodeID*)memalign(64, (csize + 1)* sizeof(NodeID)); new_token.sptc_d = (EdgeWeight*)memalign(64, (csize + 1) * sizeof(EdgeWeight)); new_token.sptc_v[0] = csize; new_token.sptc_d[0] = ceil((double)ceil((double)csize / (double)8) / (double)8); for(NodeID i = 0; i < csize; ++i){ NodeID cid = sorted_tmp[i].second; new_token.sptc_v[i + 1] = cid; new_token.sptc_d[i + 1] = vd_map[cid]; } supertokens[v] = new_token; total_supertoken_children += csize; } // Converting each tokens to supertokens vector<bool> isChild(numOfVertices + tokens_list.size(), false); for(NodeID t = 0 ; t < tokens_list.size(); ++t){ token_t& token = tokens_list[t]; NodeID r = token.sptc_v[0]; EdgeWeight csize = token.sptc_d[0]; if(csize == 0) continue; /* vector<pair<NodeID, NodeID> > sorted_tmp; unordered_map<NodeID, NodeID>& rc_map = sorted_sp[r]; */ for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; isChild[cid] = true; } //sort(sorted_tmp.rbegin(), sorted_tmp.rend()); const token_t& supertoken_r = supertokens[r]; //vector<bool>& one_level_t = one_level[t]; //NodeID second_level_length = ceil((double)supertoken_r.sptc_d[0] / (double)8); NodeID first_level_length = ceil((double)supertoken_r.sptc_v[0] / (double)8); //cout << first_level_length << "," << second_level_length << endl; vector<bool> first_level_bv(first_level_length); vector<bool> second_level_bv; // NodeID t1 = 0; NodeID t2 = 1; // NodeID tt = sorted_tmp[t1].second; NodeID st = supertoken_r.sptc_v[t2]; // NodeID ctsize = sorted_tmp.size(); NodeID stsize = supertoken_r.sptc_v[0]; //one_level_t.resize(stsize, false); vector<bool> tmp_set(8, false); for(NodeID i = 0; i < first_level_length; ++i){ NodeID in_batch = false; //if(t1 != ctsize && t2 != stsize) fill(tmp_set.begin(), tmp_set.end(), false); for(NodeID j = 0; j < 8; ++j){ // if(t1 == ctsize) break; if(t2 == (stsize + 1)) break; // tt = sorted_tmp[t1].second; st = supertoken_r.sptc_v[t2]; if(isChild[st]){ tmp_set[j] = true; in_batch = true; } t2++; } if(in_batch == false) first_level_bv[i] = false; else{ first_level_bv[i] = true; for(NodeID j = 0; j < 8; ++j) second_level_bv.push_back(tmp_set[j]); } } for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; isChild[cid] = false; } NodeID first_level_int_length = ceil((double)first_level_length/(double)8); // bytes NodeID second_level_int_length = ceil((double)second_level_bv.size() / (double)8); // bytes // if(t < 10) cout << "sl:" << r << "," << second_level_int_length << " vs " << second_level_bv.size() << " vs " << token.sptc_d[0] << ";" << stsize << " vs " << first_level_length << endl; //supertoken_r.sptc_v[0] = stsize; //how many children for this supertoken //supertoken_r.sptc_d[0] = first_level_int_length; //how many uchar to store for this token referring to this supertoken = stsize / 8 / 8 token.sptc_d[0] = second_level_int_length; //how many uchar to store for this token in second level // convert first_level_bv -> uint8_t* sptc_fbv // convert second_level_bv -> uint8_t* sptc_sbv // first_level_bv % 8 == 0; // second_level_bv % 8 == 0; token.sptc_fbv = (unsigned char*)memalign(64, first_level_int_length * sizeof(unsigned char)); token.sptc_sbv = (unsigned char*)memalign(64, second_level_int_length * sizeof(unsigned char)); // cout << "first:" << endl; for(NodeID i = 0; i < first_level_int_length; ++i){ token.sptc_fbv[i] = 0; for(NodeID j = 0; j < 8; ++j){ token.sptc_fbv[i] = token.sptc_fbv[i] << 1; if(first_level_bv[i * 8 + j]) ++token.sptc_fbv[i]; } /* bitset<8> x(token.sptc_fbv[i]); cout << x << endl; for(NodeID j = 0; j < 8; ++j){ if(first_level_bv[i * 8 + j]) cout << "1"; else cout << "0"; } cout << endl; */ } //cout << endl; //cout << "second:" << endl; for(NodeID i = 0; i < second_level_int_length; ++i){ token.sptc_sbv[i] = 0; for(NodeID j = 0; j < 8; ++j){ token.sptc_sbv[i] = token.sptc_sbv[i] << 1; if(second_level_bv[i * 8 + j]) ++token.sptc_sbv[i]; } // bitset<8> x(token.sptc_sbv[i]); // cout << x ; /* for(NodeID j = 0; j < 8; ++j){ if(second_level_bv[i * 8 + j]) cout << "1"; else cout << "0"; } */ // cout << ","; } //cout << endl; } cout << " Number of Supertokens: " << supertokens.size() << endl; cout << " Average Children of Supertokens: " << (double)total_supertoken_children / (double) supertokens.size() << endl; } CPL(Graph &graph, Ordering &orders, bool slevel) { SECOND_LEVEL = slevel; iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; //vector<vector<NodeID> > &adj = graph.adj; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_token_parents_v = tmp_idx_token_parents[v]; index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if(tmp_idx_v.second[i] == d + dst_r[w] && tmp_idx_token_parents_v.first[i] == numOfVertices){ tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); tmp_idx_token_parents_v.second[i] = dst_r[w]; } if (td <= d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_token_parents_v.first.back() = numOfVertices; tmp_idx_token_parents_v.second.back() = INF_WEIGHT; tmp_idx_token_parents_v.first.push_back(numOfVertices); tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ for (EdgeID eid = graph.vertices[v]; eid < graph.vertices[v + 1]; ++eid){ NodeID w = graph.edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } converttokens(tmp_idx, tmp_idx_token_parents, false); vector<NodeID> change_anchor(numOfVertices); for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.anchor_p[v] = change_anchor[v]; } /* for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } */ } CPL(Graph &graph, Ordering &orders, bool slevel, bool D_FLAGS) { SECOND_LEVEL = slevel; iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = dlabels.index_; vector<index_t>& bindex_ = dlabels.bindex_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /* vector<vector<NodeID> > &adj = graph.adj; vector<vector<NodeID> > &r_adj = graph.r_adj;*/ // Array Representation vector<EdgeID>& vertices = graph.vertices; vector<EdgeID>& r_vertices = graph.r_vertices; vector<NodeID>& edges = graph.edges; vector<NodeID>& r_edges = graph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); // Backward labels. vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<NodeID> que(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); // Forward labels of root. vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); // Backward labels of root. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; // Forward search. // Initialize forward labels of r. const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_token_parents_v = r_tmp_idx_token_parents[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the forward labels of r and backward labels of v in the forward search from r when reaching v. for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if(r_tmp_idx_v.second[i] == d + r_dst_r[w] && r_tmp_idx_token_parents_v.first[i] == numOfVertices){ r_tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); r_tmp_idx_token_parents_v.second[i] = r_dst_r[w]; } if (td <= d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); r_tmp_idx_token_parents_v.first.back() = numOfVertices; r_tmp_idx_token_parents_v.second.back() = INF_WEIGHT; r_tmp_idx_token_parents_v.first.push_back(numOfVertices); r_tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i];*/ // Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid){ NodeID w = edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_forward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; // Backward search. // Initialize backward labels of r. que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; vis[r] = true; que_t1 = que_h; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_token_parents_v = tmp_idx_token_parents[v]; //index_t &idx_v = index_[inv[v]]; if (usd[v]) continue; // Pruned by the backward labels of r and forward labels of v in the backward search from r when reaching v (v->r path). for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if(tmp_idx_v.second[i] == d + dst_r[w] && tmp_idx_token_parents_v.first[i] == numOfVertices){ tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); tmp_idx_token_parents_v.second[i] = dst_r[w]; } if (td <= d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_token_parents_v.first.back() = numOfVertices; tmp_idx_token_parents_v.second.back() = INF_WEIGHT; tmp_idx_token_parents_v.first.push_back(numOfVertices); tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i];*/ // Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid]; if (!vis[w]) { que[que_h++] = w; vis[w] = true; } } pruned_backward: {} } que_t0 = que_t1; que_t1 = que_h; } for (size_t i = 0; i < que_h; ++i) vis[que[i]] = false; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } converttokens(tmp_idx, tmp_idx_token_parents, false); //converttokens(tmp_idx, tmp_idx_token_parents, r_tmp_idx, r_tmp_idx_token_parents); cout << clabels.numOfTokens << " Tokens in total" << endl; cout << (double)children_size / (double) clabels.numOfTokens << " average children number" << endl; converttokens(r_tmp_idx, r_tmp_idx_token_parents, true); cout << clabels.r_numOfTokens << " Tokens in total" << endl; cout << (double)r_children_size / (double) clabels.r_numOfTokens << " average children number" << endl; vector<NodeID> change_anchor(numOfVertices); for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.anchor_p[v] = change_anchor[v]; } for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.r_anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.r_anchor_p[v] = change_anchor[v]; } } }; class CPL_W : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; bool SECOND_LEVEL = false; long children_size; long r_children_size; class nodeid_vector_hasher { public: std::size_t operator()(std::pair<NodeID, std::vector<NodeID> > const& pairvec) const { std::size_t seed = pairvec.second.size(); seed ^= pairvec.first + 0x9e3779b9 + (seed << 6) + (seed >> 2); for(auto& i : pairvec.second) { seed ^= i + 0x9e3779b9 + (seed << 6) + (seed >> 2); } return seed; } }; //using boost::hash_combine template <class T> inline void hash_combine(std::size_t& seed, T const& v) { seed ^= std::hash<T>()(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2); } NodeID convertlist(NodeID& token_id, vector<token_t>& tokens_list, vector<vector<NodeID> >& tmp_tokens, vector<vector<EdgeWeight> >& tmp_tokens_distances, unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher>& token_map, pair<vector<NodeID>, vector<EdgeWeight> > & tmp_idx_v, pair<vector<NodeID>, vector<NodeID> >& tmp_idx_token_parents_v, bool REVERSE){ NodeID lsize = tmp_idx_v.first.size(); NodeID anchorid = tmp_idx_v.first[lsize-2]; for(NodeID i = 0; i < lsize - 1; ++i){ //Add to parent_tree NodeID h = tmp_idx_v.first[i]; NodeID hparent = tmp_idx_token_parents_v.first[i]; EdgeWeight hparent_dis = tmp_idx_token_parents_v.second[i]; NodeID tid = h; // non-trival tokens if(tmp_tokens[h].size() != 0){ vector<NodeID>& tmp_token_h = tmp_tokens[h]; //string tstring = token_string(h, tmp_token_h); vector<EdgeWeight>& tmp_tokens_distances_h = tmp_tokens_distances[h]; pair<NodeID, vector<NodeID> > token_key = make_pair(h, tmp_token_h); // New token if(token_map.find(token_key) == token_map.end()){ token_map[token_key] = token_id; token_t new_token; NodeID csize = tmp_token_h.size(); new_token.sptc_v = (NodeID*)memalign(64, (csize + 1)* sizeof(NodeID)); new_token.sptc_d = (EdgeWeight*)memalign(64, (csize + 1) * sizeof(EdgeWeight)); new_token.sptc_v[0] = h; new_token.sptc_d[0] = csize; if(REVERSE) r_children_size += (csize + 1); else children_size += (csize + 1); for(NodeID j = 0; j < csize; ++j){ new_token.sptc_v[j+1] = tmp_token_h[j]; new_token.sptc_d[j+1] = tmp_tokens_distances_h[j]; } tokens_list.push_back(new_token); tid = token_id; token_id++; }else // Already exist tid = token_map[token_key]; } //trival tokens if(i == lsize - 2) anchorid = tid; if(hparent!=numOfVertices){ tmp_tokens[hparent].push_back(tid); tmp_tokens_distances[hparent].push_back(hparent_dis); } } return anchorid; } void converttokens(vector<pair<vector<NodeID>, vector<EdgeWeight> > > & tmp_idx, vector<pair<vector<NodeID>, vector<NodeID> > >& tmp_idx_token_parents, bool REVERSE){ vector<token_t> tokens_list; vector<token_t> r_tokens_list; vector<vector<NodeID> > tmp_tokens(numOfVertices); vector<vector<EdgeWeight> > tmp_tokens_distances(numOfVertices); vector<vector<NodeID> > r_tmp_tokens(numOfVertices); vector<vector<EdgeWeight> > r_tmp_tokens_distances(numOfVertices); unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher> token_map; unordered_map<pair<NodeID, vector<NodeID> >, NodeID, nodeid_vector_hasher> r_token_map; if(REVERSE == false) children_size = 0; else r_children_size = 0; NodeID token_id = numOfVertices; NodeID r_token_id = numOfVertices; if(REVERSE == false) clabels.anchor_p = (NodeID*)memalign(64, (numOfVertices)* sizeof(NodeID)); else clabels.r_anchor_p = (NodeID*)memalign(64, (numOfVertices)* sizeof(NodeID)); //vector<NodeID> que(numOfVertices, numOfVertices); for(NodeID v = 0; v < numOfVertices; ++v){ if(REVERSE == false) clabels.anchor_p[v] = convertlist(token_id, tokens_list, tmp_tokens, tmp_tokens_distances, token_map, tmp_idx[v], tmp_idx_token_parents[v], REVERSE); else clabels.r_anchor_p[v] = convertlist(r_token_id, r_tokens_list, r_tmp_tokens, r_tmp_tokens_distances, r_token_map, tmp_idx[v], tmp_idx_token_parents[v], REVERSE); //if(REVERSE == true) // validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list, clabels.r_anchor_p[v], que); if(REVERSE == false){ NodeID lsize = tmp_idx[v].first.size(); for(NodeID i = 0; i < lsize - 1; ++i){ NodeID h = tmp_idx[v].first[i]; NodeID hparent = tmp_idx_token_parents[v].first[i]; if( hparent != numOfVertices ){ if(tmp_tokens[hparent].empty() == false){ tmp_tokens[hparent].clear(); tmp_tokens_distances[hparent].clear(); } } } }else{ NodeID lsize = tmp_idx[v].first.size(); for(NodeID i = 0; i < lsize - 1; ++i){ NodeID h = tmp_idx[v].first[i]; NodeID hparent = tmp_idx_token_parents[v].first[i]; if( hparent != numOfVertices ){ if(r_tmp_tokens[hparent].empty() == false){ r_tmp_tokens[hparent].clear(); r_tmp_tokens_distances[hparent].clear(); } } } } } //vector<vector<bool> > one_level(tokens_list.size()); if(REVERSE == false){ clabels.numOfTokens = tokens_list.size(); clabels.tokenindex_p = (token_t*)memalign(64, clabels.numOfTokens * sizeof(token_t)); for(NodeID i = 0; i < clabels.numOfTokens; ++i){ clabels.tokenindex_p[i] = tokens_list[i]; } }else{ clabels.r_numOfTokens = r_tokens_list.size(); clabels.r_tokenindex_p = (token_t*)memalign(64, clabels.r_numOfTokens * sizeof(token_t)); for(NodeID i = 0; i < clabels.r_numOfTokens; ++i){ clabels.r_tokenindex_p[i] = r_tokens_list[i]; } } if(REVERSE == false){ if(SECOND_LEVEL){ vector<token_t> supertokens(numOfVertices); convertsupertokens(tokens_list, supertokens); clabels.supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t)); for(NodeID i = 0; i < supertokens.size(); ++i){ clabels.supertokenindex_p[i] = supertokens[i]; } for(NodeID i = 0; i < clabels.numOfTokens; ++i){ clabels.tokenindex_p[i] = tokens_list[i]; } //convertsupertokens(clabels.tokenindex_p, clabels.supertokenindex_p, clabels.numOfTokens); //vector<NodeID> que(numOfVertices, numOfVertices); //for(NodeID v = 0; v < numOfVertices; ++v){ //if(REVERSE) { //cout << v << endl; //validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list,supertokens, clabels.anchor_p[v], que); //validation(tmp_idx[v], tmp_idx_token_parents[v], tokens_list, clabels.supertokenindex_p, clabels.anchor_p[v], que); //} //} } }else{ if(SECOND_LEVEL){ //convertsupertokens(clabels.r_tokenindex_p, clabels.r_supertokenindex_p, clabels.r_numOfTokens); vector<token_t> r_supertokens(numOfVertices); convertsupertokens(r_tokens_list, r_supertokens); clabels.r_supertokenindex_p = (token_t*)memalign(64, numOfVertices * sizeof(token_t)); for(NodeID i = 0; i < r_supertokens.size(); ++i){ clabels.r_supertokenindex_p[i] = r_supertokens[i]; } for(NodeID i = 0; i < clabels.r_numOfTokens; ++i){ clabels.r_tokenindex_p[i] = r_tokens_list[i]; } /* //vector<NodeID> que(numOfVertices, numOfVertices); for(NodeID v = 0; v < numOfVertices; ++v){ //if(REVERSE) { cout << v << endl; //validation(tmp_idx[v], tmp_idx_token_parents[v], r_tokens_list, r_supertokens, clabels.r_anchor_p[v], que); validation(tmp_idx[v], tmp_idx_token_parents[v], r_tokens_list, clabels.r_supertokenindex_p, clabels.r_anchor_p[v], que); //} }*/ } } clabels.total_children = children_size; } void convertsupertokens(vector<token_t>& tokens_list, vector<token_t>& supertokens){ vector<unordered_map<NodeID, NodeID> > sorted_sp(numOfVertices); vector<unordered_map<NodeID, EdgeWeight> > dis_sp(numOfVertices); NodeID total_supertoken_children = 0; for(NodeID t = 0 ; t < tokens_list.size(); ++t){ token_t& token = tokens_list[t]; NodeID r = token.sptc_v[0]; EdgeWeight csize = token.sptc_d[0]; unordered_map<NodeID, NodeID>& rc_map = sorted_sp[r]; unordered_map<NodeID, EdgeWeight>& rd_map = dis_sp[r]; for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; rc_map[cid]++; rd_map[cid] = token.sptc_d[i + 1]; } } //supertokens.resize(numOfVertices); // Creating super tokens, sorting the children based on frequency for(NodeID v = 0; v < numOfVertices; ++v){ vector<pair<NodeID, NodeID> > sorted_tmp; unordered_map<NodeID, NodeID>& vc_map = sorted_sp[v]; unordered_map<NodeID, EdgeWeight>& vd_map = dis_sp[v]; for(unordered_map<NodeID, NodeID>::iterator it = vc_map.begin(); it != vc_map.end(); ++it){ sorted_tmp.push_back(make_pair((*it).second, (*it).first)); } sort(sorted_tmp.rbegin(), sorted_tmp.rend()); token_t new_token; EdgeWeight csize = sorted_tmp.size(); new_token.sptc_v = (NodeID*)memalign(64, (csize + 1)* sizeof(NodeID)); new_token.sptc_d = (EdgeWeight*)memalign(64, (csize + 1) * sizeof(EdgeWeight)); new_token.sptc_v[0] = csize; new_token.sptc_d[0] = ceil((double)ceil((double)csize / (double)8) / (double)8); for(NodeID i = 0; i < csize; ++i){ NodeID cid = sorted_tmp[i].second; new_token.sptc_v[i + 1] = cid; new_token.sptc_d[i + 1] = vd_map[cid]; } supertokens[v] = new_token; total_supertoken_children += csize; } // Converting each tokens to supertokens vector<bool> isChild(numOfVertices + tokens_list.size(), false); for(NodeID t = 0 ; t < tokens_list.size(); ++t){ token_t& token = tokens_list[t]; NodeID r = token.sptc_v[0]; EdgeWeight csize = token.sptc_d[0]; if(csize == 0) continue; /* vector<pair<NodeID, NodeID> > sorted_tmp; unordered_map<NodeID, NodeID>& rc_map = sorted_sp[r]; */ for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; isChild[cid] = true; } //sort(sorted_tmp.rbegin(), sorted_tmp.rend()); const token_t& supertoken_r = supertokens[r]; //vector<bool>& one_level_t = one_level[t]; //NodeID second_level_length = ceil((double)supertoken_r.sptc_d[0] / (double)8); NodeID first_level_length = ceil((double)supertoken_r.sptc_v[0] / (double)8); //cout << first_level_length << "," << second_level_length << endl; vector<bool> first_level_bv(first_level_length); vector<bool> second_level_bv; // NodeID t1 = 0; NodeID t2 = 1; // NodeID tt = sorted_tmp[t1].second; NodeID st = supertoken_r.sptc_v[t2]; // NodeID ctsize = sorted_tmp.size(); NodeID stsize = supertoken_r.sptc_v[0]; //one_level_t.resize(stsize, false); vector<bool> tmp_set(8, false); for(NodeID i = 0; i < first_level_length; ++i){ NodeID in_batch = false; //if(t1 != ctsize && t2 != stsize) fill(tmp_set.begin(), tmp_set.end(), false); for(NodeID j = 0; j < 8; ++j){ // if(t1 == ctsize) break; if(t2 == (stsize + 1)) break; // tt = sorted_tmp[t1].second; st = supertoken_r.sptc_v[t2]; if(isChild[st]){ tmp_set[j] = true; in_batch = true; } t2++; } if(in_batch == false) first_level_bv[i] = false; else{ first_level_bv[i] = true; for(NodeID j = 0; j < 8; ++j) second_level_bv.push_back(tmp_set[j]); } } for(EdgeWeight i = 0; i < csize; ++i){ NodeID cid = token.sptc_v[i + 1]; isChild[cid] = false; } NodeID first_level_int_length = ceil((double)first_level_length/(double)8); // bytes NodeID second_level_int_length = ceil((double)second_level_bv.size() / (double)8); // bytes // if(t < 10) cout << "sl:" << r << "," << second_level_int_length << " vs " << second_level_bv.size() << " vs " << token.sptc_d[0] << ";" << stsize << " vs " << first_level_length << endl; //supertoken_r.sptc_v[0] = stsize; //how many children for this supertoken //supertoken_r.sptc_d[0] = first_level_int_length; //how many uchar to store for this token referring to this supertoken = stsize / 8 / 8 token.sptc_d[0] = second_level_int_length; //how many uchar to store for this token in second level // convert first_level_bv -> uint8_t* sptc_fbv // convert second_level_bv -> uint8_t* sptc_sbv // first_level_bv % 8 == 0; // second_level_bv % 8 == 0; token.sptc_fbv = (unsigned char*)memalign(64, first_level_int_length * sizeof(unsigned char)); token.sptc_sbv = (unsigned char*)memalign(64, second_level_int_length * sizeof(unsigned char)); // cout << "first:" << endl; for(NodeID i = 0; i < first_level_int_length; ++i){ token.sptc_fbv[i] = 0; for(NodeID j = 0; j < 8; ++j){ token.sptc_fbv[i] = token.sptc_fbv[i] << 1; if(first_level_bv[i * 8 + j]) ++token.sptc_fbv[i]; } /* bitset<8> x(token.sptc_fbv[i]); cout << x << endl; for(NodeID j = 0; j < 8; ++j){ if(first_level_bv[i * 8 + j]) cout << "1"; else cout << "0"; } cout << endl; */ } //cout << endl; //cout << "second:" << endl; for(NodeID i = 0; i < second_level_int_length; ++i){ token.sptc_sbv[i] = 0; for(NodeID j = 0; j < 8; ++j){ token.sptc_sbv[i] = token.sptc_sbv[i] << 1; if(second_level_bv[i * 8 + j]) ++token.sptc_sbv[i]; } // bitset<8> x(token.sptc_sbv[i]); // cout << x ; /* for(NodeID j = 0; j < 8; ++j){ if(second_level_bv[i * 8 + j]) cout << "1"; else cout << "0"; } */ // cout << ","; } //cout << endl; } cout << " Number of Supertokens: " << supertokens.size() << endl; cout << " Average Children of Supertokens: " << (double)total_supertoken_children / (double) supertokens.size() << endl; } CPL_W(WGraph &wgraph, Ordering &orders, bool slevel) { SECOND_LEVEL = slevel; iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; // vector<vector<NodeID> > &adj = wgraph.adj; // vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight; vector<EdgeID>& vertices = wgraph.vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); long pop = 0; double hsize = 0; for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; long max_heap_size = 0; long heap_size = 1; while (!pqueue.empty()) { pop++; heap_size--; NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_token_parents_v = tmp_idx_token_parents[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if(tmp_idx_v.second[i] == v_d + dst_r[w] && tmp_idx_token_parents_v.first[i] == numOfVertices){ tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); tmp_idx_token_parents_v.second[i] = dst_r[w]; } if (td <= v_d) { pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_token_parents_v.first.back() = numOfVertices; tmp_idx_token_parents_v.second.back() = INF_WEIGHT; tmp_idx_token_parents_v.first.push_back(numOfVertices); tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i]; // EdgeWeight w_d = adj_weight[v][i] + v_d; for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] == INF_WEIGHT) { heap_size++; if (max_heap_size < heap_size) max_heap_size = heap_size; } if( distances[w] > w_d ){ pqueue.update(w, w_d); distances[w] = w_d; } } } pruned: {} } hsize = hsize + max_heap_size; while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; pqueue.clear(vis_v); } pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } //cout << "total pop:" << pop << endl; //cout << "heap size:" << (double)hsize / (double)numOfVertices << endl; converttokens(tmp_idx, tmp_idx_token_parents, false); vector<NodeID> change_anchor(numOfVertices); for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.anchor_p[v] = change_anchor[v]; } /* double count = 0; for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); count = count + k - 1; index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); } cout << "Average Label Size:" << count / numOfVertices << endl; */ } CPL_W(WGraph &wgraph, Ordering &orders, bool slevel, bool D_FLAGS) { SECOND_LEVEL = slevel; iteration_generated.resize(numOfVertices); pruning_power.resize(numOfVertices); vector<index_t>& index_ = dlabels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /*vector<vector<NodeID> > &adj = wgraph.adj; vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight;*/ vector<index_t>& bindex_ = dlabels.bindex_;/* vector<vector<NodeID> > &r_adj = wgraph.r_adj; vector<vector<EdgeWeight> > &r_adj_weight = wgraph.r_adj_weight;*/ //Array Representation vector<EdgeID>& vertices = wgraph.vertices; vector<EdgeID>& r_vertices = wgraph.r_vertices; vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<NodeEdgeWeightPair>& r_edges = wgraph.r_edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx_token_parents(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, numOfVertices))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); // Forward search from r. for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } const pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_r = r_tmp_idx[r]; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) { r_dst_r[r_tmp_idx_r.first[i]] = r_tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_token_parents_v = r_tmp_idx_token_parents[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < r_tmp_idx_v.first.size(); ++i) { NodeID w = r_tmp_idx_v.first[i]; EdgeWeight td = r_tmp_idx_v.second[i] + dst_r[w]; if(r_tmp_idx_v.second[i] == v_d + r_dst_r[w] && r_tmp_idx_token_parents_v.first[i] == numOfVertices){ r_tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); r_tmp_idx_token_parents_v.second[i] = r_dst_r[w]; } if (td <= v_d) { pruning_power[w]++; goto pruned_forward; } } // Traverse r_tmp_idx_v.first.back() = r; r_tmp_idx_v.second.back() = v_d; r_tmp_idx_v.first.push_back(numOfVertices); r_tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; r_tmp_idx_token_parents_v.first.back() = numOfVertices; r_tmp_idx_token_parents_v.second.back() = INF_WEIGHT; r_tmp_idx_token_parents_v.first.push_back(numOfVertices); r_tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); /* for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i]; EdgeWeight w_d = adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { NodeID w = edges[eid].first; EdgeWeight w_d = edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_forward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; //pqueue.clear(vis_v); } //pqueue.clear_n(); // Backward search from r. pqueue.update(r, 0); while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_token_parents_v = tmp_idx_token_parents[v]; vis[v] = true; visited_que.push(v); if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + r_dst_r[w]; if(tmp_idx_v.second[i] == v_d + dst_r[w] && tmp_idx_token_parents_v.first[i] == numOfVertices){ tmp_idx_token_parents_v.first[i] = r;//tmp_idx_token_parents_v.first.size(); tmp_idx_token_parents_v.second[i] = dst_r[w]; } if (td <= v_d) { pruning_power[w]++; goto pruned_backward; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); tmp_idx_token_parents_v.first.back() = numOfVertices; tmp_idx_token_parents_v.second.back() = INF_WEIGHT; tmp_idx_token_parents_v.first.push_back(numOfVertices); tmp_idx_token_parents_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; /*for (size_t i = 0; i < r_adj[v].size(); ++i) { NodeID w = r_adj[v][i]; EdgeWeight w_d = r_adj_weight[v][i] + v_d;*/ //Array Representation for (EdgeID eid = r_vertices[v]; eid < r_vertices[v + 1]; ++eid) { NodeID w = r_edges[eid].first; EdgeWeight w_d = r_edges[eid].second + v_d; if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned_backward: {} } while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; //pqueue.clear(vis_v); } // pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; for (size_t i = 0; i < r_tmp_idx_r.first.size(); ++i) r_dst_r[r_tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } converttokens(tmp_idx, tmp_idx_token_parents, false); //converttokens(tmp_idx, tmp_idx_token_parents, r_tmp_idx, r_tmp_idx_token_parents); cout << clabels.numOfTokens << " Tokens in total" << endl; cout << (double)children_size / (double) clabels.numOfTokens << " average children number" << endl; converttokens(r_tmp_idx, r_tmp_idx_token_parents, true); cout << clabels.r_numOfTokens << " Tokens in total" << endl; cout << (double)r_children_size / (double) clabels.r_numOfTokens << " average children number" << endl; vector<NodeID> change_anchor(numOfVertices); for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.anchor_p[v] = change_anchor[v]; } for (size_t v = 0; v < numOfVertices; ++v) { change_anchor[inv[v]] = clabels.r_anchor_p[v]; } for (size_t v = 0; v < numOfVertices; ++v) { clabels.r_anchor_p[v] = change_anchor[v]; } /* for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); tmp_idx[v].first.shrink_to_fit(); tmp_idx[v].second.shrink_to_fit(); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); r_tmp_idx[v].first.shrink_to_fit(); r_tmp_idx[v].second.shrink_to_fit(); }*/ } }; class Bottomup : public construction { public: vector<double> iteration_generated; vector<double> pruning_power; typedef vector<pair<NodeID, EdgeWeight> > Bucket; vector<bool> contracted; vector<Bucket> mtmBucket; vector<EdgeWeight> possibleWitness; int relabelByOrder(CHGraph &chgraph, Ordering &orders) { int totalEdge = 0; vector<NodeID>& inv = orders.inv; vector<NodeID>& rank = orders.rank; for (NodeID v = 0; v < numOfVertices; ++v) rank[inv[v]] = v; vector<vector<CHGraph::CH_Edge> > new_adj(numOfVertices); vector<vector<CHGraph::CH_Edge> > new_r_adj(numOfVertices); //for (int i = 0; i < numOfVertices; ++i) { // for (int j = 0; j < chgraph.adj[i].size(); ++j) { // if (j != chgraph.adj[i].size() - 1) // if (chgraph.adj[i][j].target == chgraph.adj[i][j + 1].target) { // cout << i << " hahaah:" << chgraph.adj[i][j].weight << "," << chgraph.adj[i][j + 1].weight << endl; // } // } //} for (NodeID v = 0; v < numOfVertices; ++v) { for (NodeID i = 0; i < chgraph.adj[v].size(); ++i) new_adj[rank[v]].push_back(CHGraph::CH_Edge(rank[chgraph.adj[v][i].target], 0, chgraph.adj[v][i].weight)); totalEdge += chgraph.adj[v].size(); if (DIRECTED_FLAG == true) { totalEdge += chgraph.r_adj[v].size(); for (NodeID i = 0; i < chgraph.r_adj[v].size(); ++i) new_r_adj[rank[v]].push_back(CHGraph::CH_Edge(rank[chgraph.r_adj[v][i].target], 0, chgraph.r_adj[v][i].weight)); } } chgraph.adj.swap(new_adj); if (DIRECTED_FLAG == true) { chgraph.r_adj.swap(new_r_adj); } for (int i = 0; i < numOfVertices; ++i) { if (DIRECTED_FLAG == true) { sort(chgraph.r_adj[i].begin(), chgraph.r_adj[i].end()); } } new_adj.clear(); if (DIRECTED_FLAG == true) { new_r_adj.clear(); } return totalEdge; } int witness_search(NodeID v, CHGraph& chgraph, const int hopLimitsParameter, Ordering& orders, vector<bool>& vis, benchmark::heap<2, EdgeWeight, NodeID>& pqueue, unordered_set<NodeID>& visited_set, vector<EdgeWeight>& distances, vector<bool>& hitTarget) { int addShortcuts = 0; vector<CHGraph::CH_Edge>& inEdges = chgraph.adj[v]; vector<CHGraph::CH_Edge>& outEdges = chgraph.adj[v]; vector<pair<NodeID, CHGraph::CH_Edge> > possibleShortcuts; // 1-hop witness search if (hopLimitsParameter == 1) { for (int i = 0; i < inEdges.size(); ++i) { //if (inEdges[i].level == V) continue; NodeID inNode = inEdges[i].target; if (inNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; for (int j = 0; j < outEdges.size(); ++j) { //if (outEdges[j].level == V) continue; NodeID outNode = outEdges[j].target; if (outNode > v ) continue; // Skip the nodes that have been already contracted. if (outNode >= inNode) continue; // For undirected case, only test each pair once and we also skip the case inNode == outNode EdgeWeight outWeight = outEdges[j].weight; EdgeWeight walkThroughWeight = inWeight + outWeight; // Distance for the path (inNode - v - outNode). bool foundWitness = false; for (int k = 0; k < outEdgesOfInNode.size(); ++k) { NodeID outNeighborOfInNode = outEdgesOfInNode[k].target; // if (outNeighborLevelOfInNode == V) continue; if (outNeighborOfInNode >= v) continue; // Skip the ondes that have been already contracted. if (outNeighborOfInNode == outNode) { EdgeWeight outNeighborWeightOfInNode = outEdgesOfInNode[k].weight; // Distance for the direct path (inNode - outNode). if (outNeighborWeightOfInNode <= walkThroughWeight) { foundWitness = true; //walkThroughWeight = outNeighborWeightOfInNode; } break; } } if (foundWitness == false) { possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } } } // 2-hop witness search. if (hopLimitsParameter == 2) { // Init the many-to-many bucket. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight outWeight = outEdges[j].weight; vector<CHGraph::CH_Edge>& inEdgesOfOutNode = chgraph.adj[outNode]; for (int k = 0; k < inEdgesOfOutNode.size(); ++k) { NodeID inNeighborOfOutNode = inEdgesOfOutNode[k].target; NodeID inNeighborLevelOfOutNode = inEdgesOfOutNode[k].level; EdgeWeight inNeighborWeightOfOutNode = inEdgesOfOutNode[k].weight; if (inNeighborOfOutNode >= v) continue; // Skip the nodes that have been already contracted. And skip the current contracted node (this is important). mtmBucket[inNeighborOfOutNode].push_back(make_pair(outNode, inNeighborWeightOfOutNode)); } } // Finish the init. for (int i = 0; i < inEdges.size(); ++i) { NodeID inNode = inEdges[i].target; if (inNode >= v) continue; // Skip the nodes that have bee n already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; // One-hop witness search from the bucket of inNode. Bucket& bucketInNode = mtmBucket[inNode]; for (int k = 0; k < bucketInNode.size(); ++k) { NodeID target = bucketInNode[k].first; EdgeWeight targetWeight = bucketInNode[k].second; if (target >= inNode) continue; if (possibleWitness[target] > targetWeight) possibleWitness[target] = targetWeight; } // Two-hop witness search from inNode. // 1-hop forward search from inNode and scan the buckets of the reached nodes to find the length of 2-hop witness. for (int k = 0; k < outEdgesOfInNode.size(); ++k) { NodeID reachNode = outEdgesOfInNode[k].target; if (reachNode >= v) continue;// Skip the nodes that have been already contracted. NodeID reachNodeLevel = outEdgesOfInNode[k].level; EdgeWeight reachNodeWeight = outEdgesOfInNode[k].weight; Bucket& bucketReachNode = mtmBucket[reachNode]; for (int q = 0; q < bucketReachNode.size(); ++q) { NodeID target = bucketReachNode[q].first; if (target >= inNode) continue; EdgeWeight newTargetWeight = bucketReachNode[q].second + reachNodeWeight; if (possibleWitness[target] > newTargetWeight) possibleWitness[target] = newTargetWeight; } } // Scan the outNode of v, to check whether shortcuts are needed for (inNode - v - outNode). for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode > v) { possibleWitness[outNode] = INF_WEIGHT; continue; } if (outNode >= inNode) { possibleWitness[outNode] = INF_WEIGHT; continue; }// undirected, scan each pair only once. EdgeWeight outWeight = outEdges[j].weight; EdgeWeight witnessWeight = possibleWitness[outNode]; possibleWitness[outNode] = INF_WEIGHT; EdgeWeight walkThroughWeight = inWeight + outWeight; if(witnessWeight > walkThroughWeight) possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } // Cleanup the many-to-many bucket. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode > v) continue; EdgeWeight outWeight = outEdges[j].weight; vector<CHGraph::CH_Edge>& inEdgesOfOutNode = chgraph.adj[outNode]; for (int k = 0; k < inEdgesOfOutNode.size(); ++k) { NodeID inNeighborOfOutNode = inEdgesOfOutNode[k].target; NodeID inNeighborLevelOfOutNode = inEdgesOfOutNode[k].level; EdgeWeight inNeighborWeightOfOutNode = inEdgesOfOutNode[k].weight; if (inNeighborOfOutNode >= v) continue; if(!mtmBucket[inNeighborOfOutNode].empty()) mtmBucket[inNeighborOfOutNode].clear(); } } } // Dijkstra Local Search. if (hopLimitsParameter > 2) { // Init the target table. int noOfTarget = 0; for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. hitTarget[outNode] = true; noOfTarget++; } // Loop the incoming node for witness search. for (int i = 0; i < inEdges.size(); ++i) { //if (inEdges[i].level == V) continue; NodeID inNode = inEdges[i].target; int visitedTarget = 0; if (inNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; pqueue.update(inNode, 0); visited_set.insert(inNode); distances[inNode] = 0; while (!pqueue.empty()) { NodeID u; EdgeWeight u_d; pqueue.extract_min(u, u_d); vis[u] = true; if (hitTarget[u] == true) visitedTarget++; if (visitedTarget == noOfTarget) break; for (int j = 0; j < chgraph.adj[u].size(); ++j) { NodeID w = chgraph.adj[u][j].target; EdgeWeight w_d = chgraph.adj[u][j].weight + u_d; if (w >= v) continue; // Can not visit contracted nodes. if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; visited_set.insert(w); } } } } // Test witness for all outNode. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; if (outNode >= inNode) continue; // undirected, scan each pair only once. EdgeWeight outWeight = outEdges[j].weight; EdgeWeight walkThroughWeight = inWeight + outWeight; if (distances[outNode] > walkThroughWeight) { possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } // Clean up the dijkstra structures otherwise the trash will manipulate the next inNode's dijkstra search. for (unordered_set<NodeID>::iterator it = visited_set.begin(); it != visited_set.end(); ++it) { NodeID cv = *it; vis[cv] = false; distances[cv] = INF_WEIGHT; } while (!pqueue.empty()) { NodeID tmpv; EdgeWeight tmpweight; pqueue.extract_min(tmpv, tmpweight); } visited_set.clear(); } // Clean the target table for the next contracted node. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. hitTarget[outNode] = false; } } // Append the shortcuts. for (int i = 0; i < possibleShortcuts.size(); ++i) { NodeID fromNode = possibleShortcuts[i].first; NodeID toNode = possibleShortcuts[i].second.target; NodeID level = possibleShortcuts[i].second.level; EdgeWeight weight = possibleShortcuts[i].second.weight; addShortcuts++; addShortcuts++; int fromAdjSize = chgraph.adj[fromNode].size(); bool skipfrom = false; for (int j = fromAdjSize - 1; j + 1 > 0; --j) { if (chgraph.adj[fromNode][j].target == toNode) { if (weight > chgraph.adj[fromNode][j].weight) break; chgraph.adj[fromNode][j].weight = weight; chgraph.adj[fromNode][j].level = level; skipfrom = true; addShortcuts--; break; } } if (!skipfrom) { chgraph.adj[fromNode].push_back(CHGraph::CH_Edge(toNode, level, weight)); } int toAdjSize = chgraph.adj[toNode].size(); bool skipto = false; for (int j = toAdjSize - 1; j + 1 > 0; --j) { if (chgraph.adj[toNode][j].target == fromNode) { if (weight > chgraph.adj[toNode][j].weight) break; chgraph.adj[toNode][j].weight = weight; chgraph.adj[toNode][j].level = level; skipto = true; addShortcuts--; break; } } if (!skipto) chgraph.adj[toNode].push_back(CHGraph::CH_Edge(fromNode, level, weight)); } addShortcuts -= chgraph.adj[v].size(); // Two times because it will appear in others' adj. possibleShortcuts.clear(); return addShortcuts; } int witness_search_directed(NodeID v, CHGraph& chgraph, const int hopLimitsParameter, Ordering& orders, vector<bool>& vis, benchmark::heap<2, EdgeWeight, NodeID>& pqueue, unordered_set<NodeID>& visited_set, vector<EdgeWeight>& distances, vector<bool>& hitTarget) { int addShortcuts = 0; vector<CHGraph::CH_Edge>& inEdges = chgraph.r_adj[v]; vector<CHGraph::CH_Edge>& outEdges = chgraph.adj[v]; vector<pair<NodeID, CHGraph::CH_Edge> > possibleShortcuts; // 1-hop witness search if (hopLimitsParameter == 1) { for (int i = 0; i < inEdges.size(); ++i) { //if (inEdges[i].level == V) continue; NodeID inNode = inEdges[i].target; if (inNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; for (int j = 0; j < outEdges.size(); ++j) { //if (outEdges[j].level == V) continue; NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. if (outNode == inNode) continue; EdgeWeight outWeight = outEdges[j].weight; EdgeWeight walkThroughWeight = inWeight + outWeight; // Distance for the path (inNode - v - outNode). bool foundWitness = false; for (int k = 0; k < outEdgesOfInNode.size(); ++k) { NodeID outNeighborOfInNode = outEdgesOfInNode[k].target; // if (outNeighborLevelOfInNode == V) continue; if (outNeighborOfInNode >= v) continue; // Skip the ondes that have been already contracted. if (outNeighborOfInNode == outNode) { EdgeWeight outNeighborWeightOfInNode = outEdgesOfInNode[k].weight; // Distance for the direct path (inNode - outNode). if (outNeighborWeightOfInNode <= walkThroughWeight) { foundWitness = true; //walkThroughWeight = outNeighborWeightOfInNode; } break; } } if (foundWitness == false) { possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } } } // 2-hop witness search. if (hopLimitsParameter == 2) { // Init the many-to-many bucket. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight outWeight = outEdges[j].weight; vector<CHGraph::CH_Edge>& inEdgesOfOutNode = chgraph.r_adj[outNode]; for (int k = 0; k < inEdgesOfOutNode.size(); ++k) { NodeID inNeighborOfOutNode = inEdgesOfOutNode[k].target; NodeID inNeighborLevelOfOutNode = inEdgesOfOutNode[k].level; EdgeWeight inNeighborWeightOfOutNode = inEdgesOfOutNode[k].weight; if (inNeighborOfOutNode >= v) continue; // Skip the nodes that have been already contracted. And skip the current contracted node (this is important). mtmBucket[inNeighborOfOutNode].push_back(make_pair(outNode, inNeighborWeightOfOutNode)); } } // Finish the init. for (int i = 0; i < inEdges.size(); ++i) { NodeID inNode = inEdges[i].target; if (inNode >= v) continue; // Skip the nodes that have bee n already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; // One-hop witness search from the bucket of inNode. Bucket& bucketInNode = mtmBucket[inNode]; for (int k = 0; k < bucketInNode.size(); ++k) { NodeID target = bucketInNode[k].first; EdgeWeight targetWeight = bucketInNode[k].second; //if (target >= inNode) continue; if (possibleWitness[target] > targetWeight) possibleWitness[target] = targetWeight; } // Two-hop witness search from inNode. // 1-hop forward search from inNode and scan the buckets of the reached nodes to find the length of 2-hop witness. for (int k = 0; k < outEdgesOfInNode.size(); ++k) { NodeID reachNode = outEdgesOfInNode[k].target; if (reachNode >= v) continue;// Skip the nodes that have been already contracted. NodeID reachNodeLevel = outEdgesOfInNode[k].level; EdgeWeight reachNodeWeight = outEdgesOfInNode[k].weight; Bucket& bucketReachNode = mtmBucket[reachNode]; for (int q = 0; q < bucketReachNode.size(); ++q) { NodeID target = bucketReachNode[q].first; // if (target >= inNode) continue; EdgeWeight newTargetWeight = bucketReachNode[q].second + reachNodeWeight; if (possibleWitness[target] > newTargetWeight) possibleWitness[target] = newTargetWeight; } } // Scan the outNode of v, to check whether shortcuts are needed for (inNode - v - outNode). for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode > v) { possibleWitness[outNode] = INF_WEIGHT; continue; } if (outNode == inNode) { possibleWitness[outNode] = INF_WEIGHT; continue; } EdgeWeight outWeight = outEdges[j].weight; EdgeWeight witnessWeight = possibleWitness[outNode]; possibleWitness[outNode] = INF_WEIGHT; EdgeWeight walkThroughWeight = inWeight + outWeight; if (witnessWeight > walkThroughWeight) possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } // Cleanup the many-to-many bucket. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode > v) continue; EdgeWeight outWeight = outEdges[j].weight; vector<CHGraph::CH_Edge>& inEdgesOfOutNode = chgraph.r_adj[outNode]; for (int k = 0; k < inEdgesOfOutNode.size(); ++k) { NodeID inNeighborOfOutNode = inEdgesOfOutNode[k].target; NodeID inNeighborLevelOfOutNode = inEdgesOfOutNode[k].level; EdgeWeight inNeighborWeightOfOutNode = inEdgesOfOutNode[k].weight; if (inNeighborOfOutNode >= v) continue; if (!mtmBucket[inNeighborOfOutNode].empty()) mtmBucket[inNeighborOfOutNode].clear(); } } } // Dijkstra Local Search. if (hopLimitsParameter > 2) { // Init the target table. int noOfTarget = 0; for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. hitTarget[outNode] = true; noOfTarget++; } // Loop the incoming node for witness search. for (int i = 0; i < inEdges.size(); ++i) { //if (inEdges[i].level == V) continue; NodeID inNode = inEdges[i].target; int visitedTarget = 0; if (inNode >= v) continue; // Skip the nodes that have been already contracted. EdgeWeight inWeight = inEdges[i].weight; vector<CHGraph::CH_Edge>& outEdgesOfInNode = chgraph.adj[inNode]; pqueue.update(inNode, 0); visited_set.insert(inNode); distances[inNode] = 0; while (!pqueue.empty()) { NodeID u; EdgeWeight u_d; pqueue.extract_min(u, u_d); vis[u] = true; if (hitTarget[u] == true) visitedTarget++; if (visitedTarget == noOfTarget) break; for (int j = 0; j < chgraph.adj[u].size(); ++j) { NodeID w = chgraph.adj[u][j].target; EdgeWeight w_d = chgraph.adj[u][j].weight + u_d; if (w >= v) continue; // Can not visit contracted nodes. if (!vis[w]) { if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; visited_set.insert(w); } } } } // Test witness for all outNode. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; if (outNode == inNode) continue; EdgeWeight outWeight = outEdges[j].weight; EdgeWeight walkThroughWeight = inWeight + outWeight; if (distances[outNode] > walkThroughWeight) { possibleShortcuts.push_back(make_pair(inNode, CHGraph::CH_Edge(outNode, v, walkThroughWeight))); } } // Clean up the dijkstra structures otherwise the trash will manipulate the next inNode's dijkstra search. for (unordered_set<NodeID>::iterator it = visited_set.begin(); it != visited_set.end(); ++it) { NodeID cv = *it; vis[cv] = false; distances[cv] = INF_WEIGHT; } while (!pqueue.empty()) { NodeID tmpv; EdgeWeight tmpweight; pqueue.extract_min(tmpv, tmpweight); } visited_set.clear(); } // Clean the target table for the next contracted node. for (int j = 0; j < outEdges.size(); ++j) { NodeID outNode = outEdges[j].target; if (outNode >= v) continue; // Skip the nodes that have been already contracted. hitTarget[outNode] = false; } } // Append the shortcuts. for (int i = 0; i < possibleShortcuts.size(); ++i) { addShortcuts += 2; NodeID fromNode = possibleShortcuts[i].first; NodeID toNode = possibleShortcuts[i].second.target; NodeID level = possibleShortcuts[i].second.level; EdgeWeight weight = possibleShortcuts[i].second.weight; int fromAdjSize = chgraph.adj[fromNode].size(); bool skipfrom = false; for (int j = fromAdjSize - 1; j + 1 > 0; --j) { if (chgraph.adj[fromNode][j].target == toNode) { if (weight > chgraph.adj[fromNode][j].weight) break; chgraph.adj[fromNode][j].weight = weight; chgraph.adj[fromNode][j].level = level; skipfrom = true; addShortcuts--; break; } } if (!skipfrom) { chgraph.adj[fromNode].push_back(CHGraph::CH_Edge(toNode, level, weight)); } int toAdjSize = chgraph.r_adj[toNode].size(); bool skipto = false; for (int j = toAdjSize - 1; j + 1 > 0; --j) { if (chgraph.r_adj[toNode][j].target == fromNode) { if (weight > chgraph.r_adj[toNode][j].weight) break; chgraph.r_adj[toNode][j].weight = weight; chgraph.r_adj[toNode][j].level = level; skipto = true; addShortcuts--; break; } } if (!skipto) chgraph.r_adj[toNode].push_back(CHGraph::CH_Edge(fromNode, level, weight)); } addShortcuts -= 2 * chgraph.adj[v].size(); // 2 times because these out (in) edges will appear in others' in (out) edges. addShortcuts -= 2 * chgraph.r_adj[v].size(); possibleShortcuts.clear(); return addShortcuts; } int build_shortcuts_dij(NodeID source, vector<vector<CHGraph::CH_Edge> >& adj, vector<bool>& vis, benchmark::heap<2, EdgeWeight, NodeID>& pqueue, unordered_set<NodeID>& visited_set, vector<EdgeWeight>& distances, vector<NodeID>& max_parents, vector<bool>& isNeighbor, vector<bool>& waitForPop, vector<vector<CHGraph::CH_Edge> >& shortcuts) { // All dsitances[] is INF_WEIGHT, all vis[] is false, all max_parents[] is numOfVertices if (source == 0) return 0; // No shortcut for the most important vertex int uncover_count = source; pqueue.update(source, 0); distances[source] = 0; //isNeighbor[source] = true; visited_set.insert(source); int in_que_cover_wait_for_pop = -1; int in_que_cover = 0; int in_que = 1; /*if (source == 3) cout << "one search" << endl;*/ //int sspace = 0; while (!pqueue.empty()) { NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); in_que--; vis[v] = true; // sspace++; // cout << "source: " << source << " visiting:" << v << " inque:" << in_que << " inque_cover:" << in_que_cover << " inque_cover_wait_for_pop:" << in_que_cover_wait_for_pop << endl; if (max_parents[v] != numOfVertices) { // If v is the first vertex having higher rank than source along its shortest path, add a shortcut. if (max_parents[v] == v && isNeighbor[v] == false) { shortcuts[source].push_back(CHGraph::CH_Edge(v, source, v_d)); in_que_cover_wait_for_pop--; } else if (max_parents[v] == v && isNeighbor[v] == true) { in_que_cover_wait_for_pop--; } if (v < source) uncover_count--; if (uncover_count == 0) { // All vertices higher rank than source have been visited. // cout << "good 1" << endl; break; } in_que_cover--; } // if (in_que == in_que_cover && v != source && in_que != 0)// All elments in queue have been covered. // continue; //if (source == 3) // cout << "v: " << v << endl //if (in_que == in_que_cover && v != source && in_que_cover_wait_for_pop != -1 && in_que_cover_wait_for_pop != 0) { // cout << "source: " << v << " inque:" << in_que << " inque_cover:" << in_que_cover << " inque_cover_wait_for_pop:" << in_que_cover_wait_for_pop << endl; // // cout << "good 2" << endl; // continue; //}; for (int i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i].target; EdgeWeight w_d = adj[v][i].weight + v_d; if (!vis[w]) { // When it is first inserted into the queue. if (distances[w] == INF_WEIGHT && w_d < distances[w]) { in_que++; visited_set.insert(w); } if (distances[w] > w_d) { distances[w] = w_d; pqueue.update(w, w_d); //if (source == 3) // cout << v << "," << max_parents[v] << "," << w << "," << max_parents[w] << endl; if (v == source) isNeighbor[w] = true; int uncover_flag = false; if (max_parents[w] == numOfVertices) uncover_flag = true; // // w is the first vertex has higher rank than source. // if (w < source && max_parents[v] == numOfVertices) { // // Newly covered vertex in queue. // /*if (max_parents[w] == numOfVertices) { // in_que_cover++; */ // if(waitForPop[w] == false){ // if (in_que_cover_wait_for_pop == -1) // in_que_cover_wait_for_pop = 0; // in_que_cover_wait_for_pop++; // waitForPop[w] = true; // } // max_parents[w] = w; // } // // // w-source has already been hit by previous max_parents[v]. // if (max_parents[v] != numOfVertices) { // // Newly covered vertex in queue. ///* if(max_parents[w] == numOfVertices) // in_que_cover++;*/ // if (waitForPop[w] == true) { // if (in_que_cover_wait_for_pop == -1) // in_que_cover_wait_for_pop = 0; // else // in_que_cover_wait_for_pop--; // waitForPop[w] = false; // } // max_parents[w] = max_parents[v]; // } // // w has already been covered previously but it is not the shortest path. it has not been covered yet. // if (w > source && max_parents[v] == numOfVertices && max_parents[w] != numOfVertices) { // max_parents[w] = numOfVertices; // in_que_cover--; // } // // w has not yet been covered. it is the first time it has been ocvered. // if (uncover_flag == true && max_parents[w] != numOfVertices) // in_que_cover++; // // The shorter path comes from u-x-v-w rather than u(v)-w. if (isNeighbor[w] == true && v != source) { isNeighbor[w] = false; } // w has not been covered yet. if (max_parents[w] == numOfVertices) { // w is the first vertex has higher rank than source. if (w < source && max_parents[v] == numOfVertices) { // Newly covered vertex in queue. /*if (max_parents[w] == numOfVertices) { in_que_cover++; */ if(waitForPop[w] == false){ if (in_que_cover_wait_for_pop == -1) in_que_cover_wait_for_pop = 0; in_que_cover_wait_for_pop++; waitForPop[w] = true; } max_parents[w] = w; } // w-source has already been hit by previous max_parents[v]. if (max_parents[v] != numOfVertices) { // Newly covered vertex in queue. /* if(max_parents[w] == numOfVertices) in_que_cover++;*/ if (waitForPop[w] == true) { if (in_que_cover_wait_for_pop == -1) in_que_cover_wait_for_pop = 0; else in_que_cover_wait_for_pop--; waitForPop[w] = false; } max_parents[w] = max_parents[v]; } // w has not yet been covered. it is the first time it has been ocvered. if (uncover_flag == true && max_parents[w] != numOfVertices) in_que_cover++; } else { // max_parents[w] != numOfVertices // w has been covered previously // v has not been covered if (max_parents[v] == numOfVertices) { if (w < source) {//w is the first higher rank vertex in this shortest path if (waitForPop[w] == false) { if (in_que_cover_wait_for_pop == -1) in_que_cover_wait_for_pop = 0; in_que_cover_wait_for_pop++; waitForPop[w] = true; } max_parents[w] = w; } else {//w has not been covered yet, remove its covered information max_parents[w] = numOfVertices; in_que_cover--; } } // w has already been covered by v. if w is the first higher rank vertex previously, remove this information if (max_parents[v] != numOfVertices) { if (max_parents[w] == w) { if (waitForPop[w] == true) { if (in_que_cover_wait_for_pop == -1) in_que_cover_wait_for_pop = 0; else in_que_cover_wait_for_pop--; waitForPop[w] = false; } max_parents[w] = max_parents[v]; } } } } } } if (in_que == in_que_cover && v != source && in_que_cover_wait_for_pop != -1 && in_que_cover_wait_for_pop == 0) { // cout << "source: " << v << " inque:" << in_que << " inque_cover:" << in_que_cover << " inque_cover_wait_for_pop:" << in_que_cover_wait_for_pop << endl; // cout << "good 2" << endl; break; } } // cout << "search space:" << sspace << endl; // Clean up the dijkstra structures otherwise the trash will manipulate the next inNode's dijkstra search. for (unordered_set<NodeID>::iterator it = visited_set.begin(); it != visited_set.end(); ++it) { NodeID cv = *it; vis[cv] = false; distances[cv] = INF_WEIGHT; max_parents[cv] = numOfVertices; isNeighbor[cv] = false; pqueue.clear(cv); waitForPop[cv] = false; } /* while (!pqueue.empty()) { NodeID tmpv; EdgeWeight tmpweight; pqueue.extract_min(tmpv, tmpweight); }*/ pqueue.clear_n(); visited_set.clear(); return 0; } //int build_shortcuts_dij(NodeID source, vector<vector<CHGraph::CH_Edge> >& adj, vector<bool>& vis, benchmark::heap<2, EdgeWeight, NodeID>& pqueue, unordered_set<NodeID>& visited_set, vector<EdgeWeight>& distances, vector<NodeID>& max_parents, vector<bool>& isNeighbor, vector<vector<CHGraph::CH_Edge> >& shortcuts) { // int uncover_count = source; // pqueue.update(source, 0); // distances[source] = 0; // //isNeighbor[source] = true; // visited_set.insert(source); // while (!pqueue.empty()) { // NodeID v; // EdgeWeight v_d; // pqueue.extract_min(v, v_d); // vis[v] = true; // if (max_parents[v] != numOfVertices) { // Either one of the ancestors of v or v has higher rank than source, we can stop the search from here. // if (max_parents[v] == v && isNeighbor[v] == false) // shortcuts[source].push_back(CHGraph::CH_Edge(v, source, v_d)); // if (v < source) // uncover_count--; // if (uncover_count == 0) // All vertices higher rank than source have been covered. // break; // continue; // } // for (int i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i].target; // EdgeWeight w_d = adj[v][i].weight + v_d; // if (!vis[w]) { // if (distances[w] > w_d) { // distances[w] = w_d; // pqueue.update(w, w_d); // visited_set.insert(w); // if (v == source) // isNeighbor[w] = true; // // w is the first vertex has higher rank than source. // if (w < source && max_parents[v] == numOfVertices) // max_parents[w] = w; // // w-source has already been hit by previous max_parents[v]. // if (max_parents[v] != numOfVertices) // max_parents[w] = max_parents[v]; // // The shorter path comes from u-x-v-w rather than u(v)-w. // if (isNeighbor[w] == true && v != source) { // isNeighbor[w] = false; // } // /* if (max_parents[w] > w) // max_parents[w] = w; // if(max_parents[w] > max_parents[v]) // max_parents[w] = max_parents[v];*/ // } // } // } // } // // Clean up the dijkstra structures otherwise the trash will manipulate the next inNode's dijkstra search. // for (unordered_set<NodeID>::iterator it = visited_set.begin(); it != visited_set.end(); ++it) { // NodeID cv = *it; // vis[cv] = false; // distances[cv] = INF_WEIGHT; // max_parents[cv] = numOfVertices; // isNeighbor[cv] = false; // pqueue.clear(cv); // } // /* while (!pqueue.empty()) { // NodeID tmpv; // EdgeWeight tmpweight; // pqueue.extract_min(tmpv, tmpweight); // }*/ // pqueue.clear_n(); // visited_set.clear(); // return 0; //} //int build_shortcuts_bfs(NodeID source, vector<vector<CHGraph::CH_Edge> >& adj, vector<bool>& vis, vector<NodeID>& max_parents, vector<bool>& isNeighbor, vector<vector<CHGraph::CH_Edge> >& shortcuts, vector<NodeID>& que) { // int uncover_count = source; // //vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); // // NodeID que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = source; // vis[source] = true; // que_t1 = que_h; // for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { // for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { // NodeID v = que[que_i]; // if (max_parents[v] != numOfVertices) { // Either one of the ancestors of v or v has higher rank than source, we can stop the search from here. // // if (max_parents[v] == v && isNeighbor[v] == false) // shortcuts[source].push_back(CHGraph::CH_Edge(v, source, d)); // // if (v < source) // uncover_count--; // if (uncover_count == 0) { // All vertices higher rank than source have been covered. // break; // } // continue; // } // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i].target; // if (!vis[w]) { // vis[w] = true; // if (v == source) // isNeighbor[w] = true; // // w is the first vertex has higher rank than source. // if (w < source )//&& max_parents[v] == numOfVertices) // max_parents[w] = w; // //// w-source has already been hit by previous max_parents[v]. // //if (max_parents[v] != numOfVertices) { // // max_parents[w] = max_parents[v]; // // continue; // //} // // //// The shorter path comes from u-x-v-w rather than u(v)-w. *We wont have this case in unweigted graphs. // //if (isNeighbor[w] == true && v != source) { // // isNeighbor[w] = false; // //} // que[que_h++] = w; // } // } // pruned: // {} // } // que_t0 = que_t1; // que_t1 = que_h; // } // for (size_t i = 0; i < que_h; ++i) { // vis[que[i]] = false; // max_parents[que[i]] = numOfVertices; // isNeighbor[que[i]] = false; // } //} int build_shortcuts_bfs(NodeID source, vector<vector<CHGraph::CH_Edge> >& adj, vector<bool>& vis, vector<NodeID>& max_parents, vector<bool>& isNeighbor, vector<vector<CHGraph::CH_Edge> >& shortcuts, vector<NodeID>& que) { int uncover_count = source; //vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); NodeID que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = source; vis[source] = true; que_t1 = que_h; int in_que = 1; int in_que_cover = 0; int in_que_wait_for_pop = -1; for (EdgeWeight d = 0; que_t0 < que_h; d = d + 1) { for (NodeID que_i = que_t0; que_i < que_t1; ++que_i) { NodeID v = que[que_i]; in_que--; if (max_parents[v] != numOfVertices) { // Either one of the ancestors of v or v has higher rank than source, we can stop the search from here. if (max_parents[v] == v && isNeighbor[v] == false) { shortcuts[source].push_back(CHGraph::CH_Edge(v, source, d)); in_que_wait_for_pop--; } if(max_parents[v] == v && isNeighbor[v] == true) in_que_wait_for_pop--; if (v < source) uncover_count--; if (uncover_count == 0) { // All vertices higher rank than source have been covered. goto jumpout; } in_que_cover--; } for (size_t i = 0; i < adj[v].size(); ++i) { NodeID w = adj[v][i].target; if (!vis[w]) { vis[w] = true; in_que++; if (v == source) isNeighbor[w] = true; // w is the first vertex has higher rank than source. if (w < source && max_parents[v] == numOfVertices) {//&& max_parents[v] == numOfVertices) max_parents[w] = w; if (in_que_wait_for_pop == -1) in_que_wait_for_pop = 0; in_que_wait_for_pop++; in_que_cover++; } if (max_parents[v] != numOfVertices) { in_que_cover++; max_parents[w] = max_parents[v]; } //// w-source has already been hit by previous max_parents[v]. //if (max_parents[v] != numOfVertices) { // max_parents[w] = max_parents[v]; // continue; //} //// The shorter path comes from u-x-v-w rather than u(v)-w. *We wont have this case in unweigted graphs. //if (isNeighbor[w] == true && v != source) { // isNeighbor[w] = false; //} que[que_h++] = w; } } if (in_que == in_que_cover && in_que_wait_for_pop == 0) { goto jumpout; } pruned: {} } que_t0 = que_t1; que_t1 = que_h; } jumpout: {} for (size_t i = 0; i < que_h; ++i) { vis[que[i]] = false; max_parents[que[i]] = numOfVertices; isNeighbor[que[i]] = false; } } void labeling(CHGraph &chgraph, Ordering &orders) { for (int i = 0; i < numOfVertices; ++i) { if(chgraph.adj[i].size() != 0) sort(chgraph.adj[i].begin(), chgraph.adj[i].end()); if (DIRECTED_FLAG == true) { if (chgraph.r_adj[i].size() != 0) sort(chgraph.r_adj[i].begin(), chgraph.r_adj[i].end()); } } vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); set<NodeID> appendCandidate; // vector<CHGraph::CH_Edge>& inEdges = chgraph.r_adj[v]; //vector<CHGraph::CH_Edge>& outEdges = chgraph.adj[v]; // Start to process every nodes. for (NodeID v = 0; v < numOfVertices; ++v) { vector<CHGraph::CH_Edge>& adj_v = chgraph.adj[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; if (DIRECTED_FLAG == false) { //for (int i = 0; i < tmp_idx_v.first.size() - 1; ++i) // dst_r[tmp_idx_v.first[i]] = tmp_idx_v.second[i]; // Search all the neighrbor u of v. for (int i = 0; i < adj_v.size(); ++i) { NodeID u = adj_v[i].target; NodeID level = adj_v[i].level; if (u >= v ) continue; EdgeWeight weight = adj_v[i].weight; // Check all the existing labels w in the labels of u. pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_u = tmp_idx[u]; for (int j = 0; j < tmp_idx_u.first.size(); ++j) { NodeID w = tmp_idx_u.first[j]; EdgeWeight vuw_distance = tmp_idx_u.second[j] + weight; // Distance from v to u to w. // Check whether the path (v->u->w) should be added into the labels of v. We prune it if the existing labels of v and w can answer the distance directly. //pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_w = tmp_idx[w]; //for (int k = 0; k < tmp_idx_w.first.size(); ++k) { // NodeID labelOfW = tmp_idx_w.first[k]; // EdgeWeight distanceToIt = tmp_idx_w.second[k] + dst_r[labelOfW]; // // Prune this label, we do not add (w, vuw_distance) to the label set of v. // if (distanceToIt <= vuw_distance) goto pruning; //} if (dst_r[w] > vuw_distance) { if (dst_r[w] == INF_WEIGHT) appendCandidate.insert(w); dst_r[w] = vuw_distance; } //pruning: {} } } // Post prune the candidate by running query test, start from the max rank candidate. It takes O(|M|^2) time, |M| is the average label size. for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_w = tmp_idx[w]; for (int k = 0; k < tmp_idx_w.first.size(); ++k) { NodeID labelOfW = tmp_idx_w.first[k]; EdgeWeight distanceToIt = tmp_idx_w.second[k] + dst_r[labelOfW]; if (dst_r[w] >= distanceToIt) { if (labelOfW != w) dst_r[w] = INF_WEIGHT; // We prune it because the this path has been hit by another higher rank vertex labelOfW. break; } } } for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; if (dst_r[w] == INF_WEIGHT) continue; tmp_idx_v.first.push_back(w); tmp_idx_v.second.push_back(dst_r[w]); dst_r[w] = INF_WEIGHT; } appendCandidate.clear(); tmp_idx_v.first.push_back(v); tmp_idx_v.second.push_back(0); } } if (DIRECTED_FLAG == false) { for (size_t v = 0; v < numOfVertices; ++v) { tmp_idx[v].first.push_back(numOfVertices); tmp_idx[v].second.push_back(INF_WEIGHT); NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; // tmp_idx[v].first.clear(); vector<NodeID>().swap(tmp_idx[v].first); vector<EdgeWeight>().swap(tmp_idx[v].second); // tmp_idx[v].second.clear(); } } } void td_labeling(CHGraph &chgraph, Ordering &orders) { for (int i = 0; i < numOfVertices; ++i) { if (chgraph.adj[i].size() != 0) sort(chgraph.adj[i].rbegin(), chgraph.adj[i].rend()); if (chgraph.r_adj[i].size() != 0) sort(chgraph.r_adj[i].rbegin(), chgraph.r_adj[i].rend()); } vector<index_t>& index_ = labels.index_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; /* vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>()));*/ vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); //vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); set<NodeID> appendCandidate; // vector<CHGraph::CH_Edge>& inEdges = chgraph.r_adj[v]; //vector<CHGraph::CH_Edge>& outEdges = chgraph.adj[v]; //vector<index_t>& index_ = labels.index_; //vector<NodeID> &inv = orders.inv; //vector<NodeID> &rank = orders.rank; // vector<vector<NodeID> > &adj = wgraph.adj; // vector<vector<EdgeWeight> > &adj_weight = wgraph.adj_weight; //vector<EdgeID>& vertices = wgraph.vertices; //vector<NodeEdgeWeightPair>& edges = wgraph.edges; vector<bool> usd(numOfVertices, false); vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(1, numOfVertices), vector<EdgeWeight>(1, INF_WEIGHT))); vector<bool> vis(numOfVertices); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); queue<NodeID> visited_que; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); long pop = 0; double hsize = 0; for (size_t r = 0; r < numOfVertices; ++r) { if (usd[r]) continue; const pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_r = tmp_idx[r]; for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) { dst_r[tmp_idx_r.first[i]] = tmp_idx_r.second[i]; } pqueue.update(r, 0); //vis[r] = true; long max_heap_size = 0; long heap_size = 1; while (!pqueue.empty()) { pop++; heap_size--; NodeID v; EdgeWeight v_d; pqueue.extract_min(v, v_d); pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; vis[v] = true; visited_que.push(v); vector<CHGraph::CH_Edge>& adj_v = chgraph.adj[v]; if (usd[v]) continue; for (size_t i = 0; i < tmp_idx_v.first.size(); ++i) { NodeID w = tmp_idx_v.first[i]; EdgeWeight td = tmp_idx_v.second[i] + dst_r[w]; if (td <= v_d) { // pruning_power[w]++; goto pruned; } } // Traverse tmp_idx_v.first.back() = r; tmp_idx_v.second.back() = v_d; tmp_idx_v.first.push_back(numOfVertices); tmp_idx_v.second.push_back(INF_WEIGHT); iteration_generated[r]++; // for (size_t i = 0; i < adj[v].size(); ++i) { // NodeID w = adj[v][i]; // EdgeWeight w_d = adj_weight[v][i] + v_d; //for (EdgeID eid = vertices[v]; eid < vertices[v + 1]; ++eid) { // NodeID w = edges[eid].first; // EdgeWeight w_d = edges[eid].second + v_d; for (int i = 0; i < adj_v.size(); ++i) { NodeID w = adj_v[i].target; EdgeWeight w_d = adj_v[i].weight + v_d; if (w < v) break; //Only the neighbors with lower ranks will be visited. if (!vis[w]) { if (distances[w] == INF_WEIGHT) { heap_size++; if (max_heap_size < heap_size) max_heap_size = heap_size; } if (distances[w] > w_d) { pqueue.update(w, w_d); distances[w] = w_d; } } } pruned: {} } hsize = hsize + max_heap_size; while (!visited_que.empty()) { NodeID vis_v = visited_que.front(); visited_que.pop(); vis[vis_v] = false; distances[vis_v] = INF_WEIGHT; pqueue.clear(vis_v); } pqueue.clear_n(); for (size_t i = 0; i < tmp_idx_r.first.size(); ++i) dst_r[tmp_idx_r.first[i]] = INF_WEIGHT; usd[r] = true; } cout << "total pop:" << pop << endl; cout << "average max heap size " << (double)hsize / (double)numOfVertices << endl; for (size_t v = 0; v < numOfVertices; ++v) { NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); } } void labeling_directed(CHGraph &chgraph, Ordering &orders) { for (int i = 0; i < numOfVertices; ++i) { /* cout << i << endl; if(i == 5073) cout << chgraph.adj[i].size() << endl; for (int j = 0; j < chgraph.adj[i].size(); ++j) { if (i == 5073) cout << "," << j; NodeID hh = chgraph.adj[i][j].target; } cout << "test ok " << chgraph.adj[i].size(); for (int j = 0; j < chgraph.r_adj[i].size(); ++j) { NodeID hh = chgraph.r_adj[i][j].target; }*/ sort(chgraph.adj[i].begin(), chgraph.adj[i].end()); sort(chgraph.r_adj[i].begin(), chgraph.r_adj[i].end()); //cout << "," << chgraph.r_adj[i].size() << endl; } vector<index_t>& index_ = dlabels.index_; vector<index_t>& bindex_ = dlabels.bindex_; vector<NodeID> &inv = orders.inv; vector<NodeID> &rank = orders.rank; vector<pair<vector<NodeID>, vector<EdgeWeight> > > tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); vector<pair<vector<NodeID>, vector<EdgeWeight> > > r_tmp_idx(numOfVertices, make_pair(vector<NodeID>(), vector<EdgeWeight>())); vector<EdgeWeight> dst_r(numOfVertices + 1, INF_WEIGHT); vector<EdgeWeight> r_dst_r(numOfVertices + 1, INF_WEIGHT); set<NodeID> appendCandidate; // The main process: // Forward search: fetch necessary outLabel of the forward adj. (all outgoing labels must pass out neighbors) // Backward search: fetch necessary inLabel of the backward adj.(all incoming labels must pass in neighbors) // Start to process every nodes. for (NodeID v = 0; v < numOfVertices; ++v) { // Forward search. { vector<CHGraph::CH_Edge>& adj_v = chgraph.adj[v]; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_v = tmp_idx[v]; // Search all the neighrbor u of v. for (int i = 0; i < adj_v.size(); ++i) { NodeID u = adj_v[i].target; NodeID level = adj_v[i].level; if (u >= v) continue; EdgeWeight weight = adj_v[i].weight; // Check all the existing labels w in the labels of u. pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_u = tmp_idx[u]; for (int j = 0; j < tmp_idx_u.first.size(); ++j) { NodeID w = tmp_idx_u.first[j]; EdgeWeight vuw_distance = tmp_idx_u.second[j] + weight; // Distance from v to u to w. if (dst_r[w] > vuw_distance) { if (dst_r[w] == INF_WEIGHT) appendCandidate.insert(w); dst_r[w] = vuw_distance; } } } // Post prune the candidate by running query test, start from the max rank candidate. It takes O(|M|^2) time, |M| is the average label size. for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_w = r_tmp_idx[w]; for (int k = 0; k < r_tmp_idx_w.first.size(); ++k) { NodeID labelOfW = r_tmp_idx_w.first[k]; EdgeWeight distanceToIt = r_tmp_idx_w.second[k] + dst_r[labelOfW]; if (dst_r[w] >= distanceToIt) { if (labelOfW != w) dst_r[w] = INF_WEIGHT; // We prune it because the this path has been hit by another higher rank vertex labelOfW. break; } } } for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; if (dst_r[w] == INF_WEIGHT) continue; tmp_idx_v.first.push_back(w); tmp_idx_v.second.push_back(dst_r[w]); dst_r[w] = INF_WEIGHT; } appendCandidate.clear(); tmp_idx_v.first.push_back(v); tmp_idx_v.second.push_back(0); } // Backward search. { vector<CHGraph::CH_Edge>& r_adj_v = chgraph.r_adj[v]; pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_v = r_tmp_idx[v]; // Search all the neighrbor u of v. for (int i = 0; i < r_adj_v.size(); ++i) { NodeID u = r_adj_v[i].target; NodeID level = r_adj_v[i].level; if (u >= v) continue; EdgeWeight weight = r_adj_v[i].weight; // Check all the existing labels w in the labels of u. pair<vector<NodeID>, vector<EdgeWeight> > &r_tmp_idx_u = r_tmp_idx[u]; for (int j = 0; j < r_tmp_idx_u.first.size(); ++j) { NodeID w = r_tmp_idx_u.first[j]; EdgeWeight wuv_distance = r_tmp_idx_u.second[j] + weight; // Distance from w to u to v. if (r_dst_r[w] > wuv_distance) { if (r_dst_r[w] == INF_WEIGHT) appendCandidate.insert(w); r_dst_r[w] = wuv_distance; } //pruning: {} } } // Post prune the candidate by running query test, start from the max rank candidate. It takes O(|M|^2) time, |M| is the average label size. for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; pair<vector<NodeID>, vector<EdgeWeight> > &tmp_idx_w = tmp_idx[w]; for (int k = 0; k < tmp_idx_w.first.size(); ++k) { NodeID labelOfW = tmp_idx_w.first[k]; EdgeWeight distanceToIt = tmp_idx_w.second[k] + r_dst_r[labelOfW]; if (r_dst_r[w] >= distanceToIt) { if (labelOfW != w) r_dst_r[w] = INF_WEIGHT; // We prune it because the this path has been hit by another higher rank vertex labelOfW. break; } } } for (set<NodeID>::iterator it = appendCandidate.begin(); it != appendCandidate.end(); ++it) { NodeID w = *it; if (r_dst_r[w] == INF_WEIGHT) continue; r_tmp_idx_v.first.push_back(w); r_tmp_idx_v.second.push_back(r_dst_r[w]); r_dst_r[w] = INF_WEIGHT; } appendCandidate.clear(); r_tmp_idx_v.first.push_back(v); r_tmp_idx_v.second.push_back(0); } } int added = 0; for (size_t v = 0; v < numOfVertices; ++v ){ tmp_idx[v].first.push_back(numOfVertices); tmp_idx[v].second.push_back(INF_WEIGHT); NodeID k = tmp_idx[v].first.size(); index_[inv[v]].spt_v.resize(k); index_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_v[i] = tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) index_[inv[v]].spt_d[i] = tmp_idx[v].second[i]; tmp_idx[v].first.clear(); tmp_idx[v].second.clear(); added += k; r_tmp_idx[v].first.push_back(numOfVertices); r_tmp_idx[v].second.push_back(INF_WEIGHT); k = r_tmp_idx[v].first.size(); bindex_[inv[v]].spt_v.resize(k); bindex_[inv[v]].spt_d.resize(k); for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_v[i] = r_tmp_idx[v].first[i]; for (NodeID i = 0; i < k; ++i) bindex_[inv[v]].spt_d[i] = r_tmp_idx[v].second[i]; r_tmp_idx[v].first.clear(); r_tmp_idx[v].second.clear(); added += k; } cout << added << endl; } Bottomup(CHGraph &chgraph, Ordering &orders, double& time_contracting, const double SWITCH_DEGREE_PARA, const int HOP_LIMIT_PARA) { iteration_generated.resize(numOfVertices); benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); unordered_set<NodeID> visited_set; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); vector<bool> hitTarget(numOfVertices);// false vector<bool> vis(numOfVertices); contracted.resize(numOfVertices, false); possibleWitness.resize(numOfVertices, INF_WEIGHT); mtmBucket.resize(numOfVertices); // Temporary structures for Dijkstra searches. NodeID remainNode = numOfVertices; int currentEdges = relabelByOrder(chgraph, orders); int hopLimitsParameter = 1; double currentDegree = (double)currentEdges / (double)numOfVertices; time_contracting = GetCurrentTimeSec(); // A main loop to pick all vertices from the least to the most important ones. for (NodeID v = numOfVertices - 1; v > 0; --v) { if(v % (numOfVertices /10) == 0) cout << "Building shortcuts for " << v << "th vertices("<< orders.inv[v] <<"). Total shortcuts ratio so far " << currentEdges / numOfEdges << " time:" << GetCurrentTimeSec() - time_contracting << "s" << endl; contracted[v] = true; double time_thisround = GetCurrentTimeSec(); int addShortcuts = witness_search(v, chgraph, hopLimitsParameter, orders, vis, pqueue, visited_set, distances, hitTarget); iteration_generated[v] = GetCurrentTimeSec() - time_thisround; currentEdges += addShortcuts; currentDegree = (double)currentEdges / (double)v; if (HOP_LIMIT_PARA == 2 || HOP_LIMIT_PARA == 1 || HOP_LIMIT_PARA == 5) { hopLimitsParameter = HOP_LIMIT_PARA; continue; } if (currentDegree > 3.3 && currentDegree <= SWITCH_DEGREE_PARA && hopLimitsParameter != 2) { hopLimitsParameter = 2; cout << "At Vertex " << v << ". Switch to 2-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; }else if (currentDegree <= 3.3 && hopLimitsParameter !=1) { hopLimitsParameter = 1; cout << "At Vertex " << v << ". Switch to 1-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; }else if(hopLimitsParameter != 5 && currentDegree > SWITCH_DEGREE_PARA){ hopLimitsParameter = 5; cout << "At Vertex " << v << ". Switch to multi-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; } } time_contracting = GetCurrentTimeSec() - time_contracting; cout << "Spent " << time_contracting << " s on creating shortcuts..." << endl; cout << "Start to label vertices..." << endl; labeling(chgraph, orders); } Bottomup(CHGraph &chgraph, Ordering &orders, bool directed_flag, double& time_contracting, const double SWITCH_DEGREE_PARA, const int HOP_LIMIT_PARA) { benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); unordered_set<NodeID> visited_set; vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); vector<bool> hitTarget(numOfVertices);// false vector<bool> vis(numOfVertices); iteration_generated.resize(numOfVertices); contracted.resize(numOfVertices, false); possibleWitness.resize(numOfVertices, INF_WEIGHT); mtmBucket.resize(numOfVertices); // Temporary structures for Dijkstra searches. NodeID remainNode = numOfVertices; int currentEdges = relabelByOrder(chgraph, orders); int hopLimitsParameter = 1; double currentDegree = (double)currentEdges / (double)numOfVertices; time_contracting = GetCurrentTimeSec(); // A main loop to pick all vertices from the least to the most important ones. for (NodeID v = numOfVertices - 1; v > 0; --v) { if (v % (numOfVertices / 10) == 0) cout << "Building shortcuts for " << v << "th vertices(" << orders.inv[v] << "). Total shortcuts ratio so far " << currentEdges / numOfEdges << " time:" << GetCurrentTimeSec()- time_contracting << "s" << endl; contracted[v] = true; double time_thisround = GetCurrentTimeSec(); int addShortcuts = witness_search_directed(v, chgraph, hopLimitsParameter, orders, vis, pqueue, visited_set, distances, hitTarget); iteration_generated[v] = GetCurrentTimeSec() - time_thisround; currentEdges += addShortcuts; currentDegree = (double)currentEdges / (double)v; if (HOP_LIMIT_PARA == 2 || HOP_LIMIT_PARA == 1 || HOP_LIMIT_PARA == 5) { hopLimitsParameter = HOP_LIMIT_PARA; continue; } if (currentDegree > 3.3 && currentDegree <= SWITCH_DEGREE_PARA && hopLimitsParameter != 2) { hopLimitsParameter = 2; cout << "At Vertex " << v << ". Switch to 2-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; } else if (currentDegree <= 3.3 && hopLimitsParameter != 1) { hopLimitsParameter = 1; cout << "At Vertex " << v << ". Switch to 1-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; } else if (hopLimitsParameter != 5 && currentDegree > SWITCH_DEGREE_PARA) { hopLimitsParameter = 5; cout << "At Vertex " << v << ". Switch to multi-hop limit with average degree:" << currentDegree << " #edge:" << currentEdges << endl; } } time_contracting = GetCurrentTimeSec() - time_contracting; cout << "Spent " << time_contracting << " s on creating shortcuts..." << endl; cout << "Start to label vertices..." << endl; labeling_directed(chgraph, orders); } Bottomup(CHGraph &chgraph, Ordering &orders, double& time_contracting) { vector<vector<CHGraph::CH_Edge> > shortcuts(numOfVertices); vector<vector<CHGraph::CH_Edge> > r_shortcuts(numOfVertices); //benchmark::heap<2, EdgeWeight, NodeID> pqueue(numOfVertices); //unordered_set<NodeID> visited_set; //vector<EdgeWeight> distances(numOfVertices, INF_WEIGHT); //vector<bool> hitTarget(numOfVertices);// false //vector<bool> vis(numOfVertices); //iteration_generated.resize(numOfVertices); //vector<NodeID> max_parents(numOfVertices, numOfVertices); //vector<bool> isNeighbor(numOfVertices, false); //vector<bool> waitForPop(numOfVertices, false); // //vector<NodeID> que(numOfVertices); relabelByOrder(chgraph, orders); long total_shortcut = 0; time_contracting = GetCurrentTimeSec(); double time_shortcuting = GetCurrentTimeSec(); int num_threads = 10; omp_set_num_threads(num_threads); vector<vector<vector<CHGraph::CH_Edge> > > shortcuts_omp(num_threads, vector<vector<CHGraph::CH_Edge> >(numOfVertices)); vector<vector<vector<CHGraph::CH_Edge> > > r_shortcuts_omp(num_threads, vector<vector<CHGraph::CH_Edge> >(numOfVertices)); vector<benchmark::heap<2, EdgeWeight, NodeID> > pqueue(num_threads, benchmark::heap<2, EdgeWeight, NodeID>(numOfVertices) ); vector<unordered_set<NodeID> > visited_set(num_threads); vector<vector<EdgeWeight> > distances(num_threads, vector<EdgeWeight>(numOfVertices, INF_WEIGHT)); vector<vector<bool> > hitTarget(num_threads, vector<bool>(numOfVertices) );// false vector<vector<bool> > vis(num_threads, vector<bool>(numOfVertices)); iteration_generated.resize(numOfVertices); vector<vector<NodeID> > max_parents(num_threads, vector<NodeID>(numOfVertices, numOfVertices)); vector<vector<bool> > isNeighbor(num_threads, vector<bool>(numOfVertices, false)); vector<vector<bool> > waitForPop(num_threads, vector<bool>(numOfVertices, false)); vector<vector<NodeID> > que(num_threads, vector<NodeID>(numOfVertices)); #pragma omp parallel for schedule(dynamic) for (NodeID v = numOfVertices - 1; v > 0; --v) { if (WEIGHTED_FLAG == true) { //build_shortcuts_dij(v, chgraph.adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, shortcuts); build_shortcuts_dij(v, chgraph.adj, vis[omp_get_thread_num()], pqueue[omp_get_thread_num()], visited_set[omp_get_thread_num()], distances[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], waitForPop[omp_get_thread_num()], shortcuts_omp[omp_get_thread_num()]); if (DIRECTED_FLAG == true) // build_shortcuts_dij(v, chgraph.r_adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, r_shortcuts); build_shortcuts_dij(v, chgraph.r_adj, vis[omp_get_thread_num()], pqueue[omp_get_thread_num()], visited_set[omp_get_thread_num()], distances[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], waitForPop[omp_get_thread_num()], r_shortcuts_omp[omp_get_thread_num()]); } else { build_shortcuts_bfs(v, chgraph.adj, vis[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], shortcuts_omp[omp_get_thread_num()], que[omp_get_thread_num()]); // build_shortcuts_bfs(v, chgraph.adj, vis[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], shortcuts_omp[omp_get_thread_num()], que[omp_get_thread_num()]); if (DIRECTED_FLAG == true) build_shortcuts_bfs(v, chgraph.r_adj, vis[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], r_shortcuts_omp[omp_get_thread_num()], que[omp_get_thread_num()]); //build_shortcuts_bfs(v, chgraph.r_adj, vis[omp_get_thread_num()], max_parents[omp_get_thread_num()], isNeighbor[omp_get_thread_num()], r_shortcuts_omp[omp_get_thread_num()], que[omp_get_thread_num()]); } } for (int t = 0; t < num_threads; ++t) { for (int v = 0; v < numOfVertices; ++v) { for (int i = 0; i < shortcuts_omp[t][v].size(); ++i) { shortcuts[v].push_back(shortcuts_omp[t][v][i]); } for (int i = 0; i < r_shortcuts_omp[t][v].size(); ++i) { r_shortcuts[v].push_back(r_shortcuts_omp[t][v][i]); } } } //for (NodeID v = numOfVertices - 1; v > 0; --v) { // if (WEIGHTED_FLAG == true) { // //build_shortcuts_dij(v, chgraph.adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, shortcuts); // build_shortcuts_dij(v, chgraph.adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, waitForPop, shortcuts); // if (DIRECTED_FLAG == true) // // build_shortcuts_dij(v, chgraph.r_adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, r_shortcuts); // build_shortcuts_dij(v, chgraph.r_adj, vis, pqueue, visited_set, distances, max_parents, isNeighbor, waitForPop, r_shortcuts); // } else { // build_shortcuts_bfs(v, chgraph.adj, vis, max_parents, isNeighbor, shortcuts, que); // if (DIRECTED_FLAG == true) // build_shortcuts_bfs(v, chgraph.r_adj, vis, max_parents, isNeighbor, r_shortcuts, que); // } //} double time_searching = GetCurrentTimeSec() - time_contracting; cout << "Spent " << time_searching << " s on searching shortcuts..." << endl; if (DIRECTED_FLAG == false) total_shortcut += process_shortcuts(chgraph, shortcuts); else total_shortcut += process_shortcuts_directed(chgraph, shortcuts, r_shortcuts); time_contracting = GetCurrentTimeSec() - time_contracting - time_searching; cout << "Spent " << time_contracting << " s on processing shortcuts..." << endl; cout << "Start to label vertices..." << endl; cout << total_shortcut << " shortcuts in total." << endl; time_shortcuting = GetCurrentTimeSec() - time_shortcuting; time_contracting = time_shortcuting; if (DIRECTED_FLAG == false) td_labeling(chgraph, orders); //labeling(chgraph, orders); else labeling_directed(chgraph, orders); } Bottomup(CHGraph &chgraph, Ordering &orders, double& time_contracting, bool CH_ORDER_FLAGS) { relabelByOrder(chgraph, orders); labeling(chgraph, orders); } long process_shortcuts(CHGraph& chgraph, vector<vector<CHGraph::CH_Edge> > shortcuts) { vector<vector<bool> > add_out(numOfVertices); vector<vector<bool> > add_in(numOfVertices); long added_shortcuts = 0; // Test whether shortcut can replace the original edges. for (NodeID v = 0; v < numOfVertices; ++v) { vector<CHGraph::CH_Edge>& shortcuts_v = shortcuts[v]; vector<bool>& add_out_v = add_out[v]; vector<bool>& add_in_v = add_in[v]; add_out_v.resize(shortcuts_v.size(), true); add_in_v.resize(shortcuts_v.size(), true); for (int i = 0; i < shortcuts_v.size(); ++i) { NodeID w = shortcuts_v[i].target; vector<CHGraph::CH_Edge>::iterator pos = lower_bound(chgraph.adj[v].begin(), chgraph.adj[v].end(), shortcuts_v[i]); if (pos != chgraph.adj[v].end() && (*pos).target == shortcuts_v[i].target) { if ((*pos).weight > shortcuts_v[i].weight) (*pos).weight = shortcuts_v[i].weight; add_out_v[i] = false; } CHGraph::CH_Edge dummy(v, 0, 0); pos = lower_bound(chgraph.adj[w].begin(), chgraph.adj[w].end(), dummy); if (pos != chgraph.adj[w].end() && (*pos).target == v) { if ((*pos).weight > shortcuts_v[i].weight) (*pos).weight = shortcuts_v[i].weight; add_in_v[i] = false; } } } // Append the actual shortcuts. for (NodeID v = 0; v < numOfVertices; ++v) { vector<CHGraph::CH_Edge>& shortcuts_v = shortcuts[v]; vector<bool>& add_out_v = add_out[v]; vector<bool>& add_in_v = add_in[v]; for (int i = 0; i < shortcuts_v.size(); ++i) { NodeID w = shortcuts_v[i].target; NodeID level = shortcuts_v[i].level; EdgeWeight weight = shortcuts_v[i].weight; if (add_out_v[i] == true) chgraph.adj[v].push_back(CHGraph::CH_Edge(w, level, weight)); if (add_in_v[i] == true) chgraph.adj[w].push_back(CHGraph::CH_Edge(v, level, weight)); if (add_out_v[i]==false && add_in_v[i]==false) added_shortcuts--; added_shortcuts++; } } return added_shortcuts; } long process_shortcuts_directed(CHGraph& chgraph, vector<vector<CHGraph::CH_Edge> > shortcuts, vector<vector<CHGraph::CH_Edge> > r_shortcuts) { vector<vector<bool> > add_out(numOfVertices); vector<vector<bool> > add_in(numOfVertices); vector<vector<bool> > r_add_out(numOfVertices); vector<vector<bool> > r_add_in(numOfVertices); long added_shortcuts = 0; // Test whether shortcut can replace the original edges. for (NodeID v = 0; v < numOfVertices; ++v) { {// Forward shortcut from (v->w), so add this shortcut into adj[v] and r_adj[w]. vector<CHGraph::CH_Edge>& shortcuts_v = shortcuts[v]; vector<bool>& add_out_v = add_out[v]; vector<bool>& add_in_v = add_in[v]; add_out_v.resize(shortcuts_v.size(), true); add_in_v.resize(shortcuts_v.size(), true); for (int i = 0; i < shortcuts_v.size(); ++i) { NodeID w = shortcuts_v[i].target; vector<CHGraph::CH_Edge>::iterator pos = lower_bound(chgraph.adj[v].begin(), chgraph.adj[v].end(), shortcuts_v[i]); if (pos != chgraph.adj[v].end() && (*pos).target == shortcuts_v[i].target) { if ((*pos).weight > shortcuts_v[i].weight) (*pos).weight = shortcuts_v[i].weight; add_out_v[i] = false; } CHGraph::CH_Edge dummy(v, 0, 0); pos = lower_bound(chgraph.r_adj[w].begin(), chgraph.r_adj[w].end(), dummy); if (pos != chgraph.r_adj[w].end() && (*pos).target == v) { if ((*pos).weight > shortcuts_v[i].weight) (*pos).weight = shortcuts_v[i].weight; add_in_v[i] = false; } } } {// Backward shortcuts, add (w->v) into adj[w] and r_adj[v]. vector<CHGraph::CH_Edge>& r_shortcuts_v = r_shortcuts[v]; vector<bool>& r_add_out_v = r_add_out[v]; vector<bool>& r_add_in_v = r_add_in[v]; r_add_out_v.resize(r_shortcuts_v.size(), true); r_add_in_v.resize(r_shortcuts_v.size(), true); for (int i = 0; i < r_shortcuts_v.size(); ++i) { NodeID w = r_shortcuts_v[i].target; vector<CHGraph::CH_Edge>::iterator pos = lower_bound(chgraph.r_adj[v].begin(), chgraph.r_adj[v].end(), r_shortcuts_v[i]); if (pos != chgraph.r_adj[v].end() && (*pos).target == r_shortcuts_v[i].target) { if ((*pos).weight > r_shortcuts_v[i].weight) (*pos).weight = r_shortcuts_v[i].weight; r_add_in_v[i] = false; } CHGraph::CH_Edge dummy(v, 0, 0); pos = lower_bound(chgraph.adj[w].begin(), chgraph.adj[w].end(), dummy); if (pos != chgraph.adj[w].end() && (*pos).target == v) { if ((*pos).weight > r_shortcuts_v[i].weight) (*pos).weight = r_shortcuts_v[i].weight; r_add_out_v[i] = false; } } } } // Append the actual shortcuts. for (NodeID v = 0; v < numOfVertices; ++v) { {//Forward shortcuts, add (v->w) to adj[v] and r_adj[w]. vector<CHGraph::CH_Edge>& shortcuts_v = shortcuts[v]; vector<bool>& add_out_v = add_out[v]; vector<bool>& add_in_v = add_in[v]; for (int i = 0; i < shortcuts_v.size(); ++i) { NodeID w = shortcuts_v[i].target; NodeID level = shortcuts_v[i].level; EdgeWeight weight = shortcuts_v[i].weight; if (add_out_v[i] == true) { chgraph.adj[v].push_back(CHGraph::CH_Edge(w, level, weight)); // cout << v << "," << w << "," << weight << endl; } if (add_in_v[i] == true) { chgraph.r_adj[w].push_back(CHGraph::CH_Edge(v, level, weight)); // cout << w << "," << v << "," << weight << endl; } if (add_out_v[i] == false && add_in_v[i] == false) added_shortcuts--; added_shortcuts++; } } {// Backward shortcuts, add (w->v) to adj[w] and r_adj[v]. vector<CHGraph::CH_Edge>& r_shortcuts_v = r_shortcuts[v]; vector<bool>& r_add_out_v = r_add_out[v]; vector<bool>& r_add_in_v = r_add_in[v]; for (int i = 0; i < r_shortcuts_v.size(); ++i) { NodeID w = r_shortcuts_v[i].target; NodeID level = r_shortcuts_v[i].level; EdgeWeight weight = r_shortcuts_v[i].weight; if (r_add_out_v[i] == true) { chgraph.adj[w].push_back(CHGraph::CH_Edge(v, level, weight)); // cout << w << "," << v << "," << weight << endl; } if (r_add_in_v[i] == true) { chgraph.r_adj[v].push_back(CHGraph::CH_Edge(w, level, weight)); // cout << v << "," << w << "," << weight << endl; } if (r_add_out_v[i] == false && r_add_in_v[i] == false) added_shortcuts--; added_shortcuts++; } } } return added_shortcuts; } }; #endif
if-clauseModificado.c
/* $ export OMP_NUM_THREADS=3 $ ./bin/if-clause 4 thread 0 suma de a[0]=0 sumalocal=0 thread 0 suma de a[1]=1 sumalocal=1 thread 0 suma de a[2]=2 sumalocal=3 thread 0 suma de a[3]=3 sumalocal=6 thread master=0 imprime suma=6 Ya que no se realiza el fork join de las hebras en la region parallel if(n>4) $ ./bin/if-clause 5 thread 0 suma de a[0]=0 sumalocal=0 thread 0 suma de a[1]=1 sumalocal=1 thread 1 suma de a[2]=2 sumalocal=2 thread 1 suma de a[3]=3 sumalocal=5 thread 2 suma de a[4]=4 sumalocal=4 thread master=0 imprime suma=10 Ejecucion con omp_set_num_threads(5); ---------------------------------------------- $ ./bin/if-clause 10 thread 4 suma de a[8]=8 sumalocal=8 thread 4 suma de a[9]=9 sumalocal=17 thread 1 suma de a[2]=2 sumalocal=2 thread 1 suma de a[3]=3 sumalocal=5 thread 0 suma de a[0]=0 sumalocal=0 thread 0 suma de a[1]=1 sumalocal=1 thread 3 suma de a[6]=6 sumalocal=6 thread 3 suma de a[7]=7 sumalocal=13 thread 2 suma de a[4]=4 sumalocal=4 thread 2 suma de a[5]=5 sumalocal=9 thread master=0 imprime suma=45 Ejecucion con omp_set_num_threads(2); ---------------------------------------------- $ ./bin/if-clause 10 thread 0 suma de a[0]=0 sumalocal=0 thread 0 suma de a[1]=1 sumalocal=1 thread 0 suma de a[2]=2 sumalocal=3 thread 0 suma de a[3]=3 sumalocal=6 thread 0 suma de a[4]=4 sumalocal=10 thread 1 suma de a[5]=5 sumalocal=5 thread 1 suma de a[6]=6 sumalocal=11 thread 1 suma de a[7]=7 sumalocal=18 thread 1 suma de a[8]=8 sumalocal=26 thread 1 suma de a[9]=9 sumalocal=35 thread master=0 imprime suma=45 Si no especificamos el numero de hebras se utiliza tantas hebras como cores tengamos en el sistema, sino variable de entorno sino funcion omp_set_num_threads() 1- omp_set_num_threads 2- export OMP_NUM_THREADS 3- default numCores */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv){ int i, n=20, tid, h; int a[n],suma=0,sumalocal; if(argc < 3) { fprintf(stderr,"[ERROR]-Falta iteraciones y num_threads\n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; h = atoi(argv[2]); if (h>4) h=4; for (i=0; i<n; i++) { a[i] = i; } // podemos especificar el numero de hebras directamente omp_set_num_threads(h); #pragma omp parallel if(n>4) default(none) private(sumalocal,tid) shared(a,suma,n) { sumalocal=0; tid=omp_get_thread_num(); // nowait no necesito que las demas hebras terminen el for porque como tenemos un atomc despues, cuando entre las demas se pararan // hasta que la hebra termine de escribir en suma #pragma omp for private(i) schedule(static) nowait for (i=0; i<n; i++){ sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf("thread master=%d imprime suma=%d\n",tid,suma); } }
omp-expand.c
/* Expansion pass for OMP directives. Outlines regions of certain OMP directives to separate functions, converts others into explicit calls to the runtime library (libgomp) and so forth Copyright (C) 2005-2018 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "memmodel.h" #include "backend.h" #include "target.h" #include "rtl.h" #include "tree.h" #include "gimple.h" #include "cfghooks.h" #include "tree-pass.h" #include "ssa.h" #include "optabs.h" #include "cgraph.h" #include "pretty-print.h" #include "diagnostic-core.h" #include "fold-const.h" #include "stor-layout.h" #include "cfganal.h" #include "internal-fn.h" #include "gimplify.h" #include "gimple-iterator.h" #include "gimplify-me.h" #include "gimple-walk.h" #include "tree-cfg.h" #include "tree-into-ssa.h" #include "tree-ssa.h" #include "splay-tree.h" #include "cfgloop.h" #include "omp-general.h" #include "omp-offload.h" #include "tree-cfgcleanup.h" #include "symbol-summary.h" #include "gomp-constants.h" #include "gimple-pretty-print.h" #include "hsa-common.h" #include "debug.h" #include "stringpool.h" #include "attribs.h" /* OMP region information. Every parallel and workshare directive is enclosed between two markers, the OMP_* directive and a corresponding GIMPLE_OMP_RETURN statement. */ struct omp_region { /* The enclosing region. */ struct omp_region *outer; /* First child region. */ struct omp_region *inner; /* Next peer region. */ struct omp_region *next; /* Block containing the omp directive as its last stmt. */ basic_block entry; /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */ basic_block exit; /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */ basic_block cont; /* If this is a combined parallel+workshare region, this is a list of additional arguments needed by the combined parallel+workshare library call. */ vec<tree, va_gc> *ws_args; /* The code for the omp directive of this region. */ enum gimple_code type; /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */ enum omp_clause_schedule_kind sched_kind; /* Schedule modifiers. */ unsigned char sched_modifiers; /* True if this is a combined parallel+workshare region. */ bool is_combined_parallel; /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has a depend clause. */ gomp_ordered *ord_stmt; }; static struct omp_region *root_omp_region; static bool omp_any_child_fn_dumped; static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree, bool = false); static gphi *find_phi_with_arg_on_edge (tree, edge); static void expand_omp (struct omp_region *region); /* Return true if REGION is a combined parallel+workshare region. */ static inline bool is_combined_parallel (struct omp_region *region) { return region->is_combined_parallel; } /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB is the immediate dominator of PAR_ENTRY_BB, return true if there are no data dependencies that would prevent expanding the parallel directive at PAR_ENTRY_BB as a combined parallel+workshare region. When expanding a combined parallel+workshare region, the call to the child function may need additional arguments in the case of GIMPLE_OMP_FOR regions. In some cases, these arguments are computed out of variables passed in from the parent to the child via 'struct .omp_data_s'. For instance: #pragma omp parallel for schedule (guided, i * 4) for (j ...) Is lowered into: # BLOCK 2 (PAR_ENTRY_BB) .omp_data_o.i = i; #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598) # BLOCK 3 (WS_ENTRY_BB) .omp_data_i = &.omp_data_o; D.1667 = .omp_data_i->i; D.1598 = D.1667 * 4; #pragma omp for schedule (guided, D.1598) When we outline the parallel region, the call to the child function 'bar.omp_fn.0' will need the value D.1598 in its argument list, but that value is computed *after* the call site. So, in principle we cannot do the transformation. To see whether the code in WS_ENTRY_BB blocks the combined parallel+workshare call, we collect all the variables used in the GIMPLE_OMP_FOR header check whether they appear on the LHS of any statement in WS_ENTRY_BB. If so, then we cannot emit the combined call. FIXME. If we had the SSA form built at this point, we could merely hoist the code in block 3 into block 2 and be done with it. But at this point we don't have dataflow information and though we could hack something up here, it is really not worth the aggravation. */ static bool workshare_safe_to_combine_p (basic_block ws_entry_bb) { struct omp_for_data fd; gimple *ws_stmt = last_stmt (ws_entry_bb); if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) return true; gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR); omp_extract_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL); if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST) return false; if (fd.iter_type != long_integer_type_node) return false; /* FIXME. We give up too easily here. If any of these arguments are not constants, they will likely involve variables that have been mapped into fields of .omp_data_s for sharing with the child function. With appropriate data flow, it would be possible to see through this. */ if (!is_gimple_min_invariant (fd.loop.n1) || !is_gimple_min_invariant (fd.loop.n2) || !is_gimple_min_invariant (fd.loop.step) || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size))) return false; return true; } /* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier presence (SIMD_SCHEDULE). */ static tree omp_adjust_chunk_size (tree chunk_size, bool simd_schedule) { if (!simd_schedule) return chunk_size; poly_uint64 vf = omp_max_vf (); if (known_eq (vf, 1U)) return chunk_size; tree type = TREE_TYPE (chunk_size); chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size, build_int_cst (type, vf - 1)); return fold_build2 (BIT_AND_EXPR, type, chunk_size, build_int_cst (type, -vf)); } /* Collect additional arguments needed to emit a combined parallel+workshare call. WS_STMT is the workshare directive being expanded. */ static vec<tree, va_gc> * get_ws_args_for (gimple *par_stmt, gimple *ws_stmt) { tree t; location_t loc = gimple_location (ws_stmt); vec<tree, va_gc> *ws_args; if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt)) { struct omp_for_data fd; tree n1, n2; omp_extract_for_data (for_stmt, &fd, NULL); n1 = fd.loop.n1; n2 = fd.loop.n2; if (gimple_omp_for_combined_into_p (for_stmt)) { tree innerc = omp_find_clause (gimple_omp_parallel_clauses (par_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } vec_alloc (ws_args, 3 + (fd.chunk_size != 0)); t = fold_convert_loc (loc, long_integer_type_node, n1); ws_args->quick_push (t); t = fold_convert_loc (loc, long_integer_type_node, n2); ws_args->quick_push (t); t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step); ws_args->quick_push (t); if (fd.chunk_size) { t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size); t = omp_adjust_chunk_size (t, fd.simd_schedule); ws_args->quick_push (t); } return ws_args; } else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS) { /* Number of sections is equal to the number of edges from the GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to the exit of the sections region. */ basic_block bb = single_succ (gimple_bb (ws_stmt)); t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1); vec_alloc (ws_args, 1); ws_args->quick_push (t); return ws_args; } gcc_unreachable (); } /* Discover whether REGION is a combined parallel+workshare region. */ static void determine_parallel_type (struct omp_region *region) { basic_block par_entry_bb, par_exit_bb; basic_block ws_entry_bb, ws_exit_bb; if (region == NULL || region->inner == NULL || region->exit == NULL || region->inner->exit == NULL || region->inner->cont == NULL) return; /* We only support parallel+for and parallel+sections. */ if (region->type != GIMPLE_OMP_PARALLEL || (region->inner->type != GIMPLE_OMP_FOR && region->inner->type != GIMPLE_OMP_SECTIONS)) return; /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and WS_EXIT_BB -> PAR_EXIT_BB. */ par_entry_bb = region->entry; par_exit_bb = region->exit; ws_entry_bb = region->inner->entry; ws_exit_bb = region->inner->exit; if (single_succ (par_entry_bb) == ws_entry_bb && single_succ (ws_exit_bb) == par_exit_bb && workshare_safe_to_combine_p (ws_entry_bb) && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb)) || (last_and_only_stmt (ws_entry_bb) && last_and_only_stmt (par_exit_bb)))) { gimple *par_stmt = last_stmt (par_entry_bb); gimple *ws_stmt = last_stmt (ws_entry_bb); if (region->inner->type == GIMPLE_OMP_FOR) { /* If this is a combined parallel loop, we need to determine whether or not to use the combined library calls. There are two cases where we do not apply the transformation: static loops and any kind of ordered loop. In the first case, we already open code the loop so there is no need to do anything else. In the latter case, the combined parallel loop call would still need extra synchronization to implement ordered semantics, so there would not be any gain in using the combined call. */ tree clauses = gimple_omp_for_clauses (ws_stmt); tree c = omp_find_clause (clauses, OMP_CLAUSE_SCHEDULE); if (c == NULL || ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK) == OMP_CLAUSE_SCHEDULE_STATIC) || omp_find_clause (clauses, OMP_CLAUSE_ORDERED)) { region->is_combined_parallel = false; region->inner->is_combined_parallel = false; return; } } region->is_combined_parallel = true; region->inner->is_combined_parallel = true; region->ws_args = get_ws_args_for (par_stmt, ws_stmt); } } /* Debugging dumps for parallel regions. */ void dump_omp_region (FILE *, struct omp_region *, int); void debug_omp_region (struct omp_region *); void debug_all_omp_regions (void); /* Dump the parallel region tree rooted at REGION. */ void dump_omp_region (FILE *file, struct omp_region *region, int indent) { fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index, gimple_code_name[region->type]); if (region->inner) dump_omp_region (file, region->inner, indent + 4); if (region->cont) { fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "", region->cont->index); } if (region->exit) fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "", region->exit->index); else fprintf (file, "%*s[no exit marker]\n", indent, ""); if (region->next) dump_omp_region (file, region->next, indent); } DEBUG_FUNCTION void debug_omp_region (struct omp_region *region) { dump_omp_region (stderr, region, 0); } DEBUG_FUNCTION void debug_all_omp_regions (void) { dump_omp_region (stderr, root_omp_region, 0); } /* Create a new parallel region starting at STMT inside region PARENT. */ static struct omp_region * new_omp_region (basic_block bb, enum gimple_code type, struct omp_region *parent) { struct omp_region *region = XCNEW (struct omp_region); region->outer = parent; region->entry = bb; region->type = type; if (parent) { /* This is a nested region. Add it to the list of inner regions in PARENT. */ region->next = parent->inner; parent->inner = region; } else { /* This is a toplevel region. Add it to the list of toplevel regions in ROOT_OMP_REGION. */ region->next = root_omp_region; root_omp_region = region; } return region; } /* Release the memory associated with the region tree rooted at REGION. */ static void free_omp_region_1 (struct omp_region *region) { struct omp_region *i, *n; for (i = region->inner; i ; i = n) { n = i->next; free_omp_region_1 (i); } free (region); } /* Release the memory for the entire omp region tree. */ void omp_free_regions (void) { struct omp_region *r, *n; for (r = root_omp_region; r ; r = n) { n = r->next; free_omp_region_1 (r); } root_omp_region = NULL; } /* A convenience function to build an empty GIMPLE_COND with just the condition. */ static gcond * gimple_build_cond_empty (tree cond) { enum tree_code pred_code; tree lhs, rhs; gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs); return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE); } /* Return true if a parallel REGION is within a declare target function or within a target region and is not a part of a gridified target. */ static bool parallel_needs_hsa_kernel_p (struct omp_region *region) { bool indirect = false; for (region = region->outer; region; region = region->outer) { if (region->type == GIMPLE_OMP_PARALLEL) indirect = true; else if (region->type == GIMPLE_OMP_TARGET) { gomp_target *tgt_stmt = as_a <gomp_target *> (last_stmt (region->entry)); if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)) return indirect; else return true; } } if (lookup_attribute ("omp declare target", DECL_ATTRIBUTES (current_function_decl))) return true; return false; } /* Change DECL_CONTEXT of CHILD_FNDECL to that of the parent function. Add CHILD_FNDECL to decl chain of the supercontext of the block ENTRY_BLOCK - this is the block which originally contained the code from which CHILD_FNDECL was created. Together, these actions ensure that the debug info for the outlined function will be emitted with the correct lexical scope. */ static void adjust_context_and_scope (tree entry_block, tree child_fndecl) { if (entry_block != NULL_TREE && TREE_CODE (entry_block) == BLOCK) { tree b = BLOCK_SUPERCONTEXT (entry_block); if (TREE_CODE (b) == BLOCK) { tree parent_fndecl; /* Follow supercontext chain until the parent fndecl is found. */ for (parent_fndecl = BLOCK_SUPERCONTEXT (b); TREE_CODE (parent_fndecl) == BLOCK; parent_fndecl = BLOCK_SUPERCONTEXT (parent_fndecl)) ; gcc_assert (TREE_CODE (parent_fndecl) == FUNCTION_DECL); DECL_CONTEXT (child_fndecl) = parent_fndecl; DECL_CHAIN (child_fndecl) = BLOCK_VARS (b); BLOCK_VARS (b) = child_fndecl; } } } /* Build the function calls to GOMP_parallel_start etc to actually generate the parallel operation. REGION is the parallel region being expanded. BB is the block where to insert the code. WS_ARGS will be set if this is a call to a combined parallel+workshare construct, it contains the list of additional arguments needed by the workshare construct. */ static void expand_parallel_call (struct omp_region *region, basic_block bb, gomp_parallel *entry_stmt, vec<tree, va_gc> *ws_args) { tree t, t1, t2, val, cond, c, clauses, flags; gimple_stmt_iterator gsi; gimple *stmt; enum built_in_function start_ix; int start_ix2; location_t clause_loc; vec<tree, va_gc> *args; clauses = gimple_omp_parallel_clauses (entry_stmt); /* Determine what flavor of GOMP_parallel we will be emitting. */ start_ix = BUILT_IN_GOMP_PARALLEL; if (is_combined_parallel (region)) { switch (region->inner->type) { case GIMPLE_OMP_FOR: gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO); switch (region->inner->sched_kind) { case OMP_CLAUSE_SCHEDULE_RUNTIME: start_ix2 = 3; break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: case OMP_CLAUSE_SCHEDULE_GUIDED: if (region->inner->sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) { start_ix2 = 3 + region->inner->sched_kind; break; } /* FALLTHRU */ default: start_ix2 = region->inner->sched_kind; break; } start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC; start_ix = (enum built_in_function) start_ix2; break; case GIMPLE_OMP_SECTIONS: start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS; break; default: gcc_unreachable (); } } /* By default, the value of NUM_THREADS is zero (selected at run time) and there is no conditional. */ cond = NULL_TREE; val = build_int_cst (unsigned_type_node, 0); flags = build_int_cst (unsigned_type_node, 0); c = omp_find_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = omp_find_clause (clauses, OMP_CLAUSE_NUM_THREADS); if (c) { val = OMP_CLAUSE_NUM_THREADS_EXPR (c); clause_loc = OMP_CLAUSE_LOCATION (c); } else clause_loc = gimple_location (entry_stmt); c = omp_find_clause (clauses, OMP_CLAUSE_PROC_BIND); if (c) flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c)); /* Ensure 'val' is of the correct type. */ val = fold_convert_loc (clause_loc, unsigned_type_node, val); /* If we found the clause 'if (cond)', build either (cond != 0) or (cond ? val : 1u). */ if (cond) { cond = gimple_boolify (cond); if (integer_zerop (val)) val = fold_build2_loc (clause_loc, EQ_EXPR, unsigned_type_node, cond, build_int_cst (TREE_TYPE (cond), 0)); else { basic_block cond_bb, then_bb, else_bb; edge e, e_then, e_else; tree tmp_then, tmp_else, tmp_join, tmp_var; tmp_var = create_tmp_var (TREE_TYPE (val)); if (gimple_in_ssa_p (cfun)) { tmp_then = make_ssa_name (tmp_var); tmp_else = make_ssa_name (tmp_var); tmp_join = make_ssa_name (tmp_var); } else { tmp_then = tmp_var; tmp_else = tmp_var; tmp_join = tmp_var; } e = split_block_after_labels (bb); cond_bb = e->src; bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); stmt = gimple_build_cond_empty (cond); gsi = gsi_start_bb (cond_bb); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); expand_omp_build_assign (&gsi, tmp_then, val, true); gsi = gsi_start_bb (else_bb); expand_omp_build_assign (&gsi, tmp_else, build_int_cst (unsigned_type_node, 1), true); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); add_bb_to_loop (then_bb, cond_bb->loop_father); add_bb_to_loop (else_bb, cond_bb->loop_father); e_then = make_edge (then_bb, bb, EDGE_FALLTHRU); e_else = make_edge (else_bb, bb, EDGE_FALLTHRU); if (gimple_in_ssa_p (cfun)) { gphi *phi = create_phi_node (tmp_join, bb); add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION); add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION); } val = tmp_join; } gsi = gsi_start_bb (bb); val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } gsi = gsi_last_nondebug_bb (bb); t = gimple_omp_parallel_data_arg (entry_stmt); if (t == NULL) t1 = null_pointer_node; else t1 = build_fold_addr_expr (t); tree child_fndecl = gimple_omp_parallel_child_fn (entry_stmt); t2 = build_fold_addr_expr (child_fndecl); adjust_context_and_scope (gimple_block (entry_stmt), child_fndecl); vec_alloc (args, 4 + vec_safe_length (ws_args)); args->quick_push (t2); args->quick_push (t1); args->quick_push (val); if (ws_args) args->splice (*ws_args); args->quick_push (flags); t = build_call_expr_loc_vec (UNKNOWN_LOCATION, builtin_decl_explicit (start_ix), args); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (hsa_gen_requested_p () && parallel_needs_hsa_kernel_p (region)) { cgraph_node *child_cnode = cgraph_node::get (child_fndecl); hsa_register_kernel (child_cnode); } } /* Build the function call to GOMP_task to actually generate the task operation. BB is the block where to insert the code. */ static void expand_task_call (struct omp_region *region, basic_block bb, gomp_task *entry_stmt) { tree t1, t2, t3; gimple_stmt_iterator gsi; location_t loc = gimple_location (entry_stmt); tree clauses = gimple_omp_task_clauses (entry_stmt); tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF); tree untied = omp_find_clause (clauses, OMP_CLAUSE_UNTIED); tree mergeable = omp_find_clause (clauses, OMP_CLAUSE_MERGEABLE); tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND); tree finalc = omp_find_clause (clauses, OMP_CLAUSE_FINAL); tree priority = omp_find_clause (clauses, OMP_CLAUSE_PRIORITY); unsigned int iflags = (untied ? GOMP_TASK_FLAG_UNTIED : 0) | (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0) | (depend ? GOMP_TASK_FLAG_DEPEND : 0); bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt); tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE; tree num_tasks = NULL_TREE; bool ull = false; if (taskloop_p) { gimple *g = last_stmt (region->outer->entry); gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR && gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP); struct omp_for_data fd; omp_extract_for_data (as_a <gomp_for *> (g), &fd, NULL); startvar = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); endvar = omp_find_clause (OMP_CLAUSE_CHAIN (startvar), OMP_CLAUSE__LOOPTEMP_); startvar = OMP_CLAUSE_DECL (startvar); endvar = OMP_CLAUSE_DECL (endvar); step = fold_convert_loc (loc, fd.iter_type, fd.loop.step); if (fd.loop.cond_code == LT_EXPR) iflags |= GOMP_TASK_FLAG_UP; tree tclauses = gimple_omp_for_clauses (g); num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_NUM_TASKS); if (num_tasks) num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks); else { num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_GRAINSIZE); if (num_tasks) { iflags |= GOMP_TASK_FLAG_GRAINSIZE; num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks); } else num_tasks = integer_zero_node; } num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks); if (ifc == NULL_TREE) iflags |= GOMP_TASK_FLAG_IF; if (omp_find_clause (tclauses, OMP_CLAUSE_NOGROUP)) iflags |= GOMP_TASK_FLAG_NOGROUP; ull = fd.iter_type == long_long_unsigned_type_node; } else if (priority) iflags |= GOMP_TASK_FLAG_PRIORITY; tree flags = build_int_cst (unsigned_type_node, iflags); tree cond = boolean_true_node; if (ifc) { if (taskloop_p) { tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc)); t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t, build_int_cst (unsigned_type_node, GOMP_TASK_FLAG_IF), build_int_cst (unsigned_type_node, 0)); flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t); } else cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc)); } if (finalc) { tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc)); t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t, build_int_cst (unsigned_type_node, GOMP_TASK_FLAG_FINAL), build_int_cst (unsigned_type_node, 0)); flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t); } if (depend) depend = OMP_CLAUSE_DECL (depend); else depend = build_int_cst (ptr_type_node, 0); if (priority) priority = fold_convert (integer_type_node, OMP_CLAUSE_PRIORITY_EXPR (priority)); else priority = integer_zero_node; gsi = gsi_last_nondebug_bb (bb); tree t = gimple_omp_task_data_arg (entry_stmt); if (t == NULL) t2 = null_pointer_node; else t2 = build_fold_addr_expr_loc (loc, t); t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt)); t = gimple_omp_task_copy_fn (entry_stmt); if (t == NULL) t3 = null_pointer_node; else t3 = build_fold_addr_expr_loc (loc, t); if (taskloop_p) t = build_call_expr (ull ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL) : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP), 11, t1, t2, t3, gimple_omp_task_arg_size (entry_stmt), gimple_omp_task_arg_align (entry_stmt), flags, num_tasks, priority, startvar, endvar, step); else t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK), 9, t1, t2, t3, gimple_omp_task_arg_size (entry_stmt), gimple_omp_task_arg_align (entry_stmt), cond, flags, depend, priority); force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */ static tree vec2chain (vec<tree, va_gc> *v) { tree chain = NULL_TREE, t; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t) { DECL_CHAIN (t) = chain; chain = t; } return chain; } /* Remove barriers in REGION->EXIT's block. Note that this is only valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be removed. */ static void remove_exit_barrier (struct omp_region *region) { gimple_stmt_iterator gsi; basic_block exit_bb; edge_iterator ei; edge e; gimple *stmt; int any_addressable_vars = -1; exit_bb = region->exit; /* If the parallel region doesn't return, we don't have REGION->EXIT block at all. */ if (! exit_bb) return; /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of statements that can appear in between are extremely limited -- no memory operations at all. Here, we allow nothing at all, so the only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */ gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gsi_prev_nondebug (&gsi); if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL) return; FOR_EACH_EDGE (e, ei, exit_bb->preds) { gsi = gsi_last_nondebug_bb (e->src); if (gsi_end_p (gsi)) continue; stmt = gsi_stmt (gsi); if (gimple_code (stmt) == GIMPLE_OMP_RETURN && !gimple_omp_return_nowait_p (stmt)) { /* OpenMP 3.0 tasks unfortunately prevent this optimization in many cases. If there could be tasks queued, the barrier might be needed to let the tasks run before some local variable of the parallel that the task uses as shared runs out of scope. The task can be spawned either from within current function (this would be easy to check) or from some function it calls and gets passed an address of such a variable. */ if (any_addressable_vars < 0) { gomp_parallel *parallel_stmt = as_a <gomp_parallel *> (last_stmt (region->entry)); tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt); tree local_decls, block, decl; unsigned ix; any_addressable_vars = 0; FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl) if (TREE_ADDRESSABLE (decl)) { any_addressable_vars = 1; break; } for (block = gimple_block (stmt); !any_addressable_vars && block && TREE_CODE (block) == BLOCK; block = BLOCK_SUPERCONTEXT (block)) { for (local_decls = BLOCK_VARS (block); local_decls; local_decls = DECL_CHAIN (local_decls)) if (TREE_ADDRESSABLE (local_decls)) { any_addressable_vars = 1; break; } if (block == gimple_block (parallel_stmt)) break; } } if (!any_addressable_vars) gimple_omp_return_set_nowait (stmt); } } } static void remove_exit_barriers (struct omp_region *region) { if (region->type == GIMPLE_OMP_PARALLEL) remove_exit_barrier (region); if (region->inner) { region = region->inner; remove_exit_barriers (region); while (region->next) { region = region->next; remove_exit_barriers (region); } } } /* Optimize omp_get_thread_num () and omp_get_num_threads () calls. These can't be declared as const functions, but within one parallel body they are constant, so they can be transformed there into __builtin_omp_get_{thread_num,num_threads} () which are declared const. Similarly for task body, except that in untied task omp_get_thread_num () can change at any task scheduling point. */ static void optimize_omp_library_calls (gimple *entry_stmt) { basic_block bb; gimple_stmt_iterator gsi; tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree); tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree); bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK && omp_find_clause (gimple_omp_task_clauses (entry_stmt), OMP_CLAUSE_UNTIED) != NULL); FOR_EACH_BB_FN (bb, cfun) for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *call = gsi_stmt (gsi); tree decl; if (is_gimple_call (call) && (decl = gimple_call_fndecl (call)) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl) && DECL_INITIAL (decl) == NULL) { tree built_in; if (DECL_NAME (decl) == thr_num_id) { /* In #pragma omp task untied omp_get_thread_num () can change during the execution of the task region. */ if (untied_task) continue; built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); } else if (DECL_NAME (decl) == num_thr_id) built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); else continue; if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in) || gimple_call_num_args (call) != 0) continue; if (flag_exceptions && !TREE_NOTHROW (decl)) continue; if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)), TREE_TYPE (TREE_TYPE (built_in)))) continue; gimple_call_set_fndecl (call, built_in); } } } /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be regimplified. */ static tree expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *) { tree t = *tp; /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */ if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t)) return t; if (TREE_CODE (t) == ADDR_EXPR) recompute_tree_invariant_for_addr_expr (t); *walk_subtrees = !TYPE_P (t) && !DECL_P (t); return NULL_TREE; } /* Prepend or append TO = FROM assignment before or after *GSI_P. */ static void expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from, bool after) { bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to); from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE, !after, after ? GSI_CONTINUE_LINKING : GSI_SAME_STMT); gimple *stmt = gimple_build_assign (to, from); if (after) gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING); else gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT); if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL) || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL)) { gimple_stmt_iterator gsi = gsi_for_stmt (stmt); gimple_regimplify_operands (stmt, &gsi); } } /* Expand the OpenMP parallel or task directive starting at REGION. */ static void expand_omp_taskreg (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t; gimple_stmt_iterator gsi; gimple *entry_stmt, *stmt; edge e; vec<tree, va_gc> *ws_args; entry_stmt = last_stmt (region->entry); child_fn = gimple_omp_taskreg_child_fn (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); entry_bb = region->entry; if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK) exit_bb = region->cont; else exit_bb = region->exit; if (is_combined_parallel (region)) ws_args = region->ws_args; else ws_args = NULL; if (child_cfun->cfg) { /* Due to inlining, it may happen that we have already outlined the region, in which case all we need to do is make the sub-graph unreachable and emit the parallel call. */ edge entry_succ_e, exit_succ_e; entry_succ_e = single_succ_edge (entry_bb); gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK); gsi_remove (&gsi, true); new_bb = entry_bb; if (exit_bb) { exit_succ_e = single_succ_edge (exit_bb); make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU); } remove_edge_and_dominated_blocks (entry_succ_e); } else { unsigned srcidx, dstidx, num; /* If the parallel region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the parallel body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ if (gimple_omp_taskreg_data_arg (entry_stmt)) { basic_block entry_succ_bb = single_succ_p (entry_bb) ? single_succ (entry_bb) : FALLTHRU_EDGE (entry_bb)->dest; tree arg; gimple *parcopy_stmt = NULL; for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) { gimple *stmt; gcc_assert (!gsi_end_p (gsi)); stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_ASSIGN) continue; if (gimple_num_ops (stmt) == 2) { tree arg = gimple_assign_rhs1 (stmt); /* We're ignore the subcode because we're effectively doing a STRIP_NOPS. */ if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == gimple_omp_taskreg_data_arg (entry_stmt)) { parcopy_stmt = stmt; break; } } } gcc_assert (parcopy_stmt != NULL); arg = DECL_ARGUMENTS (child_fn); if (!gimple_in_ssa_p (cfun)) { if (gimple_assign_lhs (parcopy_stmt) == arg) gsi_remove (&gsi, true); else { /* ?? Is setting the subcode really necessary ?? */ gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg)); gimple_assign_set_rhs1 (parcopy_stmt, arg); } } else { tree lhs = gimple_assign_lhs (parcopy_stmt); gcc_assert (SSA_NAME_VAR (lhs) == arg); /* We'd like to set the rhs to the default def in the child_fn, but it's too early to create ssa names in the child_fn. Instead, we set the rhs to the parm. In move_sese_region_to_fn, we introduce a default def for the parm, map the parm to it's default def, and once we encounter this stmt, replace the parm with the default def. */ gimple_assign_set_rhs1 (parcopy_stmt, arg); update_stmt (parcopy_stmt); } } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); /* The gimplifier could record temporaries in parallel/task block rather than in containing function's local_decls chain, which would mean cgraph missed finalizing them. Do it now. */ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t)) varpool_node::finalize_decl (t); DECL_SAVED_TREE (child_fn) = NULL; /* We'll create a CFG for child_fn, so no gimple body is needed. */ gimple_set_body (child_fn, NULL); TREE_USED (block) = 1; /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK, so that it can be moved to the child function. */ gsi = gsi_last_nondebug_bb (entry_bb); stmt = gsi_stmt (gsi); gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL || gimple_code (stmt) == GIMPLE_OMP_TASK)); e = split_block (entry_bb, stmt); gsi_remove (&gsi, true); entry_bb = e->dest; edge e2 = NULL; if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL) single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; else { e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL); gcc_assert (e2->dest == region->exit); remove_edge (BRANCH_EDGE (entry_bb)); set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src); gsi = gsi_last_nondebug_bb (region->exit); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gsi_remove (&gsi, true); } /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */ if (exit_bb) { gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (!gsi_end_p (gsi) && (gimple_code (gsi_stmt (gsi)) == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN))); stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); } /* Move the parallel region into CHILD_CFUN. */ if (gimple_in_ssa_p (cfun)) { init_tree_ssa (child_cfun); init_ssa_operands (child_cfun); child_cfun->gimple_df->in_ssa_p = true; block = NULL_TREE; } else block = gimple_block (entry_stmt); /* Make sure to generate early debug for the function before outlining anything. */ if (! gimple_in_ssa_p (cfun)) (*debug_hooks->early_global_decl) (cfun->decl); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; if (e2) { basic_block dest_bb = e2->dest; if (!exit_bb) make_edge (new_bb, dest_bb, EDGE_FALLTHRU); remove_edge (e2); set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb); } /* When the OMP expansion process cannot guarantee an up-to-date loop tree arrange for the child function to fixup loops. */ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ num = vec_safe_length (child_cfun->local_decls); for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) { t = (*child_cfun->local_decls)[srcidx]; if (DECL_CONTEXT (t) == cfun->decl) continue; if (srcidx != dstidx) (*child_cfun->local_decls)[dstidx] = t; dstidx++; } if (dstidx != num) vec_safe_truncate (child_cfun->local_decls, dstidx); /* Inform the callgraph about the new function. */ child_cfun->curr_properties = cfun->curr_properties; child_cfun->has_simduid_loops |= cfun->has_simduid_loops; child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops; cgraph_node *node = cgraph_node::get_create (child_fn); node->parallelized_function = 1; cgraph_node::add_new_function (child_fn, true); bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl) && !DECL_ASSEMBLER_NAME_SET_P (child_fn); /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); if (need_asm) assign_assembler_name_if_needed (child_fn); if (optimize) optimize_omp_library_calls (entry_stmt); update_max_bb_count (); cgraph_edge::rebuild_edges (); /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; bool changed = false; FOR_EACH_BB_FN (bb, cfun) changed |= gimple_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); } if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa); if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) verify_loop_structure (); pop_cfun (); if (dump_file && !gimple_in_ssa_p (cfun)) { omp_any_child_fn_dumped = true; dump_function_header (dump_file, child_fn, dump_flags); dump_function_to_file (child_fn, dump_file, dump_flags); } } if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL) expand_parallel_call (region, new_bb, as_a <gomp_parallel *> (entry_stmt), ws_args); else expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt)); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_only_virtuals); } /* Information about members of an OpenACC collapsed loop nest. */ struct oacc_collapse { tree base; /* Base value. */ tree iters; /* Number of steps. */ tree step; /* Step size. */ tree tile; /* Tile increment (if tiled). */ tree outer; /* Tile iterator var. */ }; /* Helper for expand_oacc_for. Determine collapsed loop information. Fill in COUNTS array. Emit any initialization code before GSI. Return the calculated outer loop bound of BOUND_TYPE. */ static tree expand_oacc_collapse_init (const struct omp_for_data *fd, gimple_stmt_iterator *gsi, oacc_collapse *counts, tree bound_type, location_t loc) { tree tiling = fd->tiling; tree total = build_int_cst (bound_type, 1); int ix; gcc_assert (integer_onep (fd->loop.step)); gcc_assert (integer_zerop (fd->loop.n1)); /* When tiling, the first operand of the tile clause applies to the innermost loop, and we work outwards from there. Seems backwards, but whatever. */ for (ix = fd->collapse; ix--;) { const omp_for_data_loop *loop = &fd->loops[ix]; tree iter_type = TREE_TYPE (loop->v); tree diff_type = iter_type; tree plus_type = iter_type; gcc_assert (loop->cond_code == fd->loop.cond_code); if (POINTER_TYPE_P (iter_type)) plus_type = sizetype; if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type)) diff_type = signed_type_for (diff_type); if (TYPE_PRECISION (diff_type) < TYPE_PRECISION (integer_type_node)) diff_type = integer_type_node; if (tiling) { tree num = build_int_cst (integer_type_node, fd->collapse); tree loop_no = build_int_cst (integer_type_node, ix); tree tile = TREE_VALUE (tiling); gcall *call = gimple_build_call_internal (IFN_GOACC_TILE, 5, num, loop_no, tile, /* gwv-outer=*/integer_zero_node, /* gwv-inner=*/integer_zero_node); counts[ix].outer = create_tmp_var (iter_type, ".outer"); counts[ix].tile = create_tmp_var (diff_type, ".tile"); gimple_call_set_lhs (call, counts[ix].tile); gimple_set_location (call, loc); gsi_insert_before (gsi, call, GSI_SAME_STMT); tiling = TREE_CHAIN (tiling); } else { counts[ix].tile = NULL; counts[ix].outer = loop->v; } tree b = loop->n1; tree e = loop->n2; tree s = loop->step; bool up = loop->cond_code == LT_EXPR; tree dir = build_int_cst (diff_type, up ? +1 : -1); bool negating; tree expr; b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE, true, GSI_SAME_STMT); e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE, true, GSI_SAME_STMT); /* Convert the step, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (TREE_TYPE (s)); if (negating) s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s); s = fold_convert (diff_type, s); if (negating) s = fold_build1 (NEGATE_EXPR, diff_type, s); s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE, true, GSI_SAME_STMT); /* Determine the range, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (iter_type); expr = fold_build2 (MINUS_EXPR, plus_type, fold_convert (plus_type, negating ? b : e), fold_convert (plus_type, negating ? e : b)); expr = fold_convert (diff_type, expr); if (negating) expr = fold_build1 (NEGATE_EXPR, diff_type, expr); tree range = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); /* Determine number of iterations. */ expr = fold_build2 (MINUS_EXPR, diff_type, range, dir); expr = fold_build2 (PLUS_EXPR, diff_type, expr, s); expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s); tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); counts[ix].base = b; counts[ix].iters = iters; counts[ix].step = s; total = fold_build2 (MULT_EXPR, bound_type, total, fold_convert (bound_type, iters)); } return total; } /* Emit initializers for collapsed loop members. INNER is true if this is for the element loop of a TILE. IVAR is the outer loop iteration variable, from which collapsed loop iteration values are calculated. COUNTS array has been initialized by expand_oacc_collapse_inits. */ static void expand_oacc_collapse_vars (const struct omp_for_data *fd, bool inner, gimple_stmt_iterator *gsi, const oacc_collapse *counts, tree ivar) { tree ivar_type = TREE_TYPE (ivar); /* The most rapidly changing iteration variable is the innermost one. */ for (int ix = fd->collapse; ix--;) { const omp_for_data_loop *loop = &fd->loops[ix]; const oacc_collapse *collapse = &counts[ix]; tree v = inner ? loop->v : collapse->outer; tree iter_type = TREE_TYPE (v); tree diff_type = TREE_TYPE (collapse->step); tree plus_type = iter_type; enum tree_code plus_code = PLUS_EXPR; tree expr; if (POINTER_TYPE_P (iter_type)) { plus_code = POINTER_PLUS_EXPR; plus_type = sizetype; } expr = ivar; if (ix) { tree mod = fold_convert (ivar_type, collapse->iters); ivar = fold_build2 (TRUNC_DIV_EXPR, ivar_type, expr, mod); expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, expr, mod); ivar = force_gimple_operand_gsi (gsi, ivar, true, NULL_TREE, true, GSI_SAME_STMT); } expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr), collapse->step); expr = fold_build2 (plus_code, iter_type, inner ? collapse->outer : collapse->base, fold_convert (plus_type, expr)); expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); gassign *ass = gimple_build_assign (v, expr); gsi_insert_before (gsi, ass, GSI_SAME_STMT); } } /* Helper function for expand_omp_{for_*,simd}. If this is the outermost of the combined collapse > 1 loop constructs, generate code like: if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB; if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; Furthermore, if ZERO_ITER_BB is NULL, create a BB which does: count = 0; and set ZERO_ITER_BB to that bb. If this isn't the outermost of the combined loop constructs, just initialize COUNTS array from the _looptemp_ clauses. */ /* NOTE: It *could* be better to moosh all of the BBs together, creating one larger BB with all the computation and the unexpected jump at the end. I.e. bool zero3, zero2, zero1, zero; zero3 = N32 c3 N31; count3 = (N32 - N31) /[cl] STEP3; zero2 = N22 c2 N21; count2 = (N22 - N21) /[cl] STEP2; zero1 = N12 c1 N11; count1 = (N12 - N11) /[cl] STEP1; zero = zero3 || zero2 || zero1; count = count1 * count2 * count3; if (__builtin_expect(zero, false)) goto zero_iter_bb; After all, we expect the zero=false, and thus we expect to have to evaluate all of the comparison expressions, so short-circuiting oughtn't be a win. Since the condition isn't protecting a denominator, we're not concerned about divide-by-zero, so we can fully evaluate count even if a numerator turned out to be wrong. It seems like putting this all together would create much better scheduling opportunities, and less pressure on the chip's branch predictor. */ static void expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi, basic_block &entry_bb, tree *counts, basic_block &zero_iter1_bb, int &first_zero_iter1, basic_block &zero_iter2_bb, int &first_zero_iter2, basic_block &l2_dom_bb) { tree t, type = TREE_TYPE (fd->loop.v); edge e, ne; int i; /* Collapsed loops need work for expansion into SSA form. */ gcc_assert (!gimple_in_ssa_p (cfun)); if (gimple_omp_for_combined_into_p (fd->for_stmt) && TREE_CODE (fd->loop.n2) != INTEGER_CST) { gcc_assert (fd->ordered == 0); /* First two _looptemp_ clauses are for istart/iend, counts[0] isn't supposed to be handled, as the inner loop doesn't use it. */ tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); for (i = 0; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); if (i) counts[i] = OMP_CLAUSE_DECL (innerc); else counts[0] = NULL_TREE; } return; } for (i = fd->collapse; i < fd->ordered; i++) { tree itype = TREE_TYPE (fd->loops[i].v); counts[i] = NULL_TREE; t = fold_binary (fd->loops[i].cond_code, boolean_type_node, fold_convert (itype, fd->loops[i].n1), fold_convert (itype, fd->loops[i].n2)); if (t && integer_zerop (t)) { for (i = fd->collapse; i < fd->ordered; i++) counts[i] = build_int_cst (type, 0); break; } } for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++) { tree itype = TREE_TYPE (fd->loops[i].v); if (i >= fd->collapse && counts[i]) continue; if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse) && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node, fold_convert (itype, fd->loops[i].n1), fold_convert (itype, fd->loops[i].n2))) == NULL_TREE || !integer_onep (t))) { gcond *cond_stmt; tree n1, n2; n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1)); n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2)); n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { *gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, gsi); } e = split_block (entry_bb, cond_stmt); basic_block &zero_iter_bb = i < fd->collapse ? zero_iter1_bb : zero_iter2_bb; int &first_zero_iter = i < fd->collapse ? first_zero_iter1 : first_zero_iter2; if (zero_iter_bb == NULL) { gassign *assign_stmt; first_zero_iter = i; zero_iter_bb = create_empty_bb (entry_bb); add_bb_to_loop (zero_iter_bb, entry_bb->loop_father); *gsi = gsi_after_labels (zero_iter_bb); if (i < fd->collapse) assign_stmt = gimple_build_assign (fd->loop.n2, build_zero_cst (type)); else { counts[i] = create_tmp_reg (type, ".count"); assign_stmt = gimple_build_assign (counts[i], build_zero_cst (type)); } gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT); set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb, entry_bb); } ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE); ne->probability = profile_probability::very_unlikely (); e->flags = EDGE_TRUE_VALUE; e->probability = ne->probability.invert (); if (l2_dom_bb == NULL) l2_dom_bb = entry_bb; entry_bb = e->dest; *gsi = gsi_last_nondebug_bb (entry_bb); } if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, fold_convert (itype, fd->loops[i].step), t); t = fold_build2 (PLUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n2)); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loops[i].n1)); /* ?? We could probably use CEIL_DIV_EXPR instead of TRUNC_DIV_EXPR and adjusting by hand. Unless we can't generate the same code in the end because generically we don't know that the values involved must be negative for GT?? */ if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, fold_convert (itype, fd->loops[i].step))); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); t = fold_convert (type, t); if (TREE_CODE (t) == INTEGER_CST) counts[i] = t; else { if (i < fd->collapse || i != first_zero_iter2) counts[i] = create_tmp_reg (type, ".count"); expand_omp_build_assign (gsi, counts[i], t); } if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse) { if (i == 0) t = counts[0]; else t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]); expand_omp_build_assign (gsi, fd->loop.n2, t); } } } /* Helper function for expand_omp_{for_*,simd}. Generate code like: T = V; V3 = N31 + (T % count3) * STEP3; T = T / count3; V2 = N21 + (T % count2) * STEP2; T = T / count2; V1 = N11 + T * STEP1; if this loop doesn't have an inner loop construct combined with it. If it does have an inner loop construct combined with it and the iteration count isn't known constant, store values from counts array into its _looptemp_ temporaries instead. */ static void expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi, tree *counts, gimple *inner_stmt, tree startvar) { int i; if (gimple_omp_for_combined_p (fd->for_stmt)) { /* If fd->loop.n2 is constant, then no propagation of the counts is needed, they are constant. */ if (TREE_CODE (fd->loop.n2) == INTEGER_CST) return; tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR ? gimple_omp_taskreg_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); /* First two _looptemp_ clauses are for istart/iend, counts[0] isn't supposed to be handled, as the inner loop doesn't use it. */ tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); for (i = 0; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); if (i) { tree tem = OMP_CLAUSE_DECL (innerc); tree t = fold_convert (TREE_TYPE (tem), counts[i]); t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); gassign *stmt = gimple_build_assign (tem, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } } return; } tree type = TREE_TYPE (fd->loop.v); tree tem = create_tmp_reg (type, ".tem"); gassign *stmt = gimple_build_assign (tem, startvar); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v), itype, t; itype = vtype; if (POINTER_TYPE_P (vtype)) itype = signed_type_for (vtype); if (i != 0) t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]); else t = tem; t = fold_convert (itype, t); t = fold_build2 (MULT_EXPR, itype, t, fold_convert (itype, fd->loops[i].step)); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].n1, t); else t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t); t = force_gimple_operand_gsi (gsi, t, DECL_P (fd->loops[i].v) && TREE_ADDRESSABLE (fd->loops[i].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); if (i != 0) { t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]); t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (tem, t); gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING); } } } /* Helper function for expand_omp_for_*. Generate code like: L10: V3 += STEP3; if (V3 cond3 N32) goto BODY_BB; else goto L11; L11: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto BODY_BB; else goto L12; L12: V2 = N21; V1 += STEP1; goto BODY_BB; */ static basic_block extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb, basic_block body_bb) { basic_block last_bb, bb, collapse_bb = NULL; int i; gimple_stmt_iterator gsi; edge e; tree t; gimple *stmt; last_bb = cont_bb; for (i = fd->collapse - 1; i >= 0; i--) { tree vtype = TREE_TYPE (fd->loops[i].v); bb = create_empty_bb (last_bb); add_bb_to_loop (bb, last_bb->loop_father); gsi = gsi_start_bb (bb); if (i < fd->collapse - 1) { e = make_edge (last_bb, bb, EDGE_FALSE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (1, 8); t = fd->loops[i + 1].n1; t = force_gimple_operand_gsi (&gsi, t, DECL_P (fd->loops[i + 1].v) && TREE_ADDRESSABLE (fd->loops[i + 1].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i + 1].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); } else collapse_bb = bb; set_immediate_dominator (CDI_DOMINATORS, bb, last_bb); if (POINTER_TYPE_P (vtype)) t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step); else t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (fd->loops[i].v) && TREE_ADDRESSABLE (fd->loops[i].v), NULL_TREE, false, GSI_CONTINUE_LINKING); stmt = gimple_build_assign (fd->loops[i].v, t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); if (i > 0) { t = fd->loops[i].n2; t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree v = fd->loops[i].v; if (DECL_P (v) && TREE_ADDRESSABLE (v)) v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t); stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); e = make_edge (bb, body_bb, EDGE_TRUE_VALUE); e->probability = profile_probability::guessed_always ().apply_scale (7, 8); } else make_edge (bb, body_bb, EDGE_FALLTHRU); last_bb = bb; } return collapse_bb; } /* Expand #pragma omp ordered depend(source). */ static void expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd, tree *counts, location_t loc) { enum built_in_function source_ix = fd->iter_type == long_integer_type_node ? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST; gimple *g = gimple_build_call (builtin_decl_explicit (source_ix), 1, build_fold_addr_expr (counts[fd->ordered])); gimple_set_location (g, loc); gsi_insert_before (gsi, g, GSI_SAME_STMT); } /* Expand a single depend from #pragma omp ordered depend(sink:...). */ static void expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd, tree *counts, tree c, location_t loc) { auto_vec<tree, 10> args; enum built_in_function sink_ix = fd->iter_type == long_integer_type_node ? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT; tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE; int i; gimple_stmt_iterator gsi2 = *gsi; bool warned_step = false; for (i = 0; i < fd->ordered; i++) { tree step = NULL_TREE; off = TREE_PURPOSE (deps); if (TREE_CODE (off) == TRUNC_DIV_EXPR) { step = TREE_OPERAND (off, 1); off = TREE_OPERAND (off, 0); } if (!integer_zerop (off)) { gcc_assert (fd->loops[i].cond_code == LT_EXPR || fd->loops[i].cond_code == GT_EXPR); bool forward = fd->loops[i].cond_code == LT_EXPR; if (step) { /* Non-simple Fortran DO loops. If step is variable, we don't know at compile even the direction, so can't warn. */ if (TREE_CODE (step) != INTEGER_CST) break; forward = tree_int_cst_sgn (step) != -1; } if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) warning_at (loc, 0, "%<depend(sink)%> clause waiting for " "lexically later iteration"); break; } deps = TREE_CHAIN (deps); } /* If all offsets corresponding to the collapsed loops are zero, this depend clause can be ignored. FIXME: but there is still a flush needed. We need to emit one __sync_synchronize () for it though (perhaps conditionally)? Solve this together with the conservative dependence folding optimization. if (i >= fd->collapse) return; */ deps = OMP_CLAUSE_DECL (c); gsi_prev (&gsi2); edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2)); edge e2 = split_block_after_labels (e1->dest); gsi2 = gsi_after_labels (e1->dest); *gsi = gsi_last_bb (e1->src); for (i = 0; i < fd->ordered; i++) { tree itype = TREE_TYPE (fd->loops[i].v); tree step = NULL_TREE; tree orig_off = NULL_TREE; if (POINTER_TYPE_P (itype)) itype = sizetype; if (i) deps = TREE_CHAIN (deps); off = TREE_PURPOSE (deps); if (TREE_CODE (off) == TRUNC_DIV_EXPR) { step = TREE_OPERAND (off, 1); off = TREE_OPERAND (off, 0); gcc_assert (fd->loops[i].cond_code == LT_EXPR && integer_onep (fd->loops[i].step) && !POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))); } tree s = fold_convert_loc (loc, itype, step ? step : fd->loops[i].step); if (step) { off = fold_convert_loc (loc, itype, off); orig_off = off; off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s); } if (integer_zerop (off)) t = boolean_true_node; else { tree a; tree co = fold_convert_loc (loc, itype, off); if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))) { if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) co = fold_build1_loc (loc, NEGATE_EXPR, itype, co); a = fold_build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, co); } else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, co); else a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, co); if (step) { tree t1, t2; if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t1 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, fd->loops[i].n1); else t1 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, fd->loops[i].n2); if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t2 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, fd->loops[i].n2); else t2 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, fd->loops[i].n1); t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, step, build_int_cst (TREE_TYPE (step), 0)); if (TREE_CODE (step) != INTEGER_CST) { t1 = unshare_expr (t1); t1 = force_gimple_operand_gsi (gsi, t1, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t2 = unshare_expr (t2); t2 = force_gimple_operand_gsi (gsi, t2, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } t = fold_build3_loc (loc, COND_EXPR, boolean_type_node, t, t2, t1); } else if (fd->loops[i].cond_code == LT_EXPR) { if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a, fd->loops[i].n1); else t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a, fd->loops[i].n2); } else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a, fd->loops[i].n2); else t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a, fd->loops[i].n1); } if (cond) cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t); else cond = t; off = fold_convert_loc (loc, itype, off); if (step || (fd->loops[i].cond_code == LT_EXPR ? !integer_onep (fd->loops[i].step) : !integer_minus_onep (fd->loops[i].step))) { if (step == NULL_TREE && TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off, fold_build1_loc (loc, NEGATE_EXPR, itype, s)); else t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, orig_off ? orig_off : off, s); t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t, build_int_cst (itype, 0)); if (integer_zerop (t) && !warned_step) { warning_at (loc, 0, "%<depend(sink)%> refers to iteration never " "in the iteration space"); warned_step = true; } cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t); } if (i <= fd->collapse - 1 && fd->collapse > 1) t = fd->loop.v; else if (counts[i]) t = counts[i]; else { t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, fd->loops[i].n1); t = fold_convert_loc (loc, fd->iter_type, t); } if (step) /* We have divided off by step already earlier. */; else if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR) off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, fold_build1_loc (loc, NEGATE_EXPR, itype, s)); else off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s); if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps)) off = fold_build1_loc (loc, NEGATE_EXPR, itype, off); off = fold_convert_loc (loc, fd->iter_type, off); if (i <= fd->collapse - 1 && fd->collapse > 1) { if (i) off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff, off); if (i < fd->collapse - 1) { coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off, counts[i]); continue; } } off = unshare_expr (off); t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off); t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE, true, GSI_SAME_STMT); args.safe_push (t); } gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args); gimple_set_location (g, loc); gsi_insert_before (&gsi2, g, GSI_SAME_STMT); cond = unshare_expr (cond); cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false, GSI_CONTINUE_LINKING); gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT); edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE); e3->probability = profile_probability::guessed_always ().apply_scale (1, 8); e1->probability = e3->probability.invert (); e1->flags = EDGE_TRUE_VALUE; set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src); *gsi = gsi_after_labels (e2->dest); } /* Expand all #pragma omp ordered depend(source) and #pragma omp ordered depend(sink:...) constructs in the current #pragma omp for ordered(n) region. */ static void expand_omp_ordered_source_sink (struct omp_region *region, struct omp_for_data *fd, tree *counts, basic_block cont_bb) { struct omp_region *inner; int i; for (i = fd->collapse - 1; i < fd->ordered; i++) if (i == fd->collapse - 1 && fd->collapse > 1) counts[i] = NULL_TREE; else if (i >= fd->collapse && !cont_bb) counts[i] = build_zero_cst (fd->iter_type); else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)) && integer_onep (fd->loops[i].step)) counts[i] = NULL_TREE; else counts[i] = create_tmp_var (fd->iter_type, ".orditer"); tree atype = build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1); counts[fd->ordered] = create_tmp_var (atype, ".orditera"); TREE_ADDRESSABLE (counts[fd->ordered]) = 1; for (inner = region->inner; inner; inner = inner->next) if (inner->type == GIMPLE_OMP_ORDERED) { gomp_ordered *ord_stmt = inner->ord_stmt; gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt); location_t loc = gimple_location (ord_stmt); tree c; for (c = gimple_omp_ordered_clauses (ord_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE) break; if (c) expand_omp_ordered_source (&gsi, fd, counts, loc); for (c = gimple_omp_ordered_clauses (ord_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) expand_omp_ordered_sink (&gsi, fd, counts, c, loc); gsi_remove (&gsi, true); } } /* Wrap the body into fd->ordered - fd->collapse loops that aren't collapsed. */ static basic_block expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts, basic_block cont_bb, basic_block body_bb, bool ordered_lastprivate) { if (fd->ordered == fd->collapse) return cont_bb; if (!cont_bb) { gimple_stmt_iterator gsi = gsi_after_labels (body_bb); for (int i = fd->collapse; i < fd->ordered; i++) { tree type = TREE_TYPE (fd->loops[i].v); tree n1 = fold_convert (type, fd->loops[i].n1); expand_omp_build_assign (&gsi, fd->loops[i].v, n1); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_int (i - fd->collapse + 1), NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type)); } return NULL; } for (int i = fd->ordered - 1; i >= fd->collapse; i--) { tree t, type = TREE_TYPE (fd->loops[i].v); gimple_stmt_iterator gsi = gsi_after_labels (body_bb); expand_omp_build_assign (&gsi, fd->loops[i].v, fold_convert (type, fd->loops[i].n1)); if (counts[i]) expand_omp_build_assign (&gsi, counts[i], build_zero_cst (fd->iter_type)); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_int (i - fd->collapse + 1), NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type)); if (!gsi_end_p (gsi)) gsi_prev (&gsi); else gsi = gsi_last_bb (body_bb); edge e1 = split_block (body_bb, gsi_stmt (gsi)); basic_block new_body = e1->dest; if (body_bb == cont_bb) cont_bb = new_body; edge e2 = NULL; basic_block new_header; if (EDGE_COUNT (cont_bb->preds) > 0) { gsi = gsi_last_bb (cont_bb); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loops[i].v, fold_convert (sizetype, fd->loops[i].step)); else t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v, fold_convert (type, fd->loops[i].step)); expand_omp_build_assign (&gsi, fd->loops[i].v, t); if (counts[i]) { t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i], build_int_cst (fd->iter_type, 1)); expand_omp_build_assign (&gsi, counts[i], t); t = counts[i]; } else { t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, fd->loops[i].n1); t = fold_convert (fd->iter_type, t); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); } aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_int (i - fd->collapse + 1), NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, t); gsi_prev (&gsi); e2 = split_block (cont_bb, gsi_stmt (gsi)); new_header = e2->dest; } else new_header = cont_bb; gsi = gsi_after_labels (new_header); tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE, true, GSI_SAME_STMT); tree n2 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2), true, NULL_TREE, true, GSI_SAME_STMT); t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT); edge e3 = split_block (new_header, gsi_stmt (gsi)); cont_bb = e3->dest; remove_edge (e1); make_edge (body_bb, new_header, EDGE_FALLTHRU); e3->flags = EDGE_FALSE_VALUE; e3->probability = profile_probability::guessed_always ().apply_scale (1, 8); e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE); e1->probability = e3->probability.invert (); set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb); set_immediate_dominator (CDI_DOMINATORS, new_body, new_header); if (e2) { struct loop *loop = alloc_loop (); loop->header = new_header; loop->latch = e2->src; add_loop (loop, body_bb->loop_father); } } /* If there are any lastprivate clauses and it is possible some loops might have zero iterations, ensure all the decls are initialized, otherwise we could crash evaluating C++ class iterators with lastprivate clauses. */ bool need_inits = false; for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++) if (need_inits) { tree type = TREE_TYPE (fd->loops[i].v); gimple_stmt_iterator gsi = gsi_after_labels (body_bb); expand_omp_build_assign (&gsi, fd->loops[i].v, fold_convert (type, fd->loops[i].n1)); } else { tree type = TREE_TYPE (fd->loops[i].v); tree this_cond = fold_build2 (fd->loops[i].cond_code, boolean_type_node, fold_convert (type, fd->loops[i].n1), fold_convert (type, fd->loops[i].n2)); if (!integer_onep (this_cond)) need_inits = true; } return cont_bb; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with any schedule. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; iend = iend0; L1: BODY; V += STEP; if (V cond iend) goto L1; else goto L2; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: If this is a combined omp parallel loop, instead of the call to GOMP_loop_foo_start, we call GOMP_loop_foo_next. If this is gimple_omp_for_combined_p loop, then instead of assigning V and iend in L0 we assign the first two _looptemp_ clause decls of the inner GIMPLE_OMP_FOR and V += STEP; and if (V cond iend) goto L1; else goto L2; are removed. For collapsed loops, given parameters: collapse(3) for (V1 = N11; V1 cond1 N12; V1 += STEP1) for (V2 = N21; V2 cond2 N22; V2 += STEP2) for (V3 = N31; V3 cond3 N32; V3 += STEP3) BODY; we generate pseudocode if (__builtin_expect (N32 cond3 N31, 0)) goto Z0; if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (__builtin_expect (N22 cond2 N21, 0)) goto Z0; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (__builtin_expect (N12 cond1 N11, 0)) goto Z0; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; goto Z1; Z0: count = 0; Z1: more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0); if (more) goto L0; else goto L3; L0: V = istart0; T = V; V3 = N31 + (T % count3) * STEP3; T = T / count3; V2 = N21 + (T % count2) * STEP2; T = T / count2; V1 = N11 + T * STEP1; iend = iend0; L1: BODY; V += 1; if (V < iend) goto L10; else goto L2; L10: V3 += STEP3; if (V3 cond3 N32) goto L1; else goto L11; L11: V3 = N31; V2 += STEP2; if (V2 cond2 N22) goto L1; else goto L12; L12: V2 = N21; V1 += STEP1; goto L1; L2: if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3; L3: */ static void expand_omp_for_generic (struct omp_region *region, struct omp_for_data *fd, enum built_in_function start_fn, enum built_in_function next_fn, gimple *inner_stmt) { tree type, istart0, iend0, iend; tree t, vmain, vback, bias = NULL_TREE; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb; basic_block l2_bb = NULL, l3_bb = NULL; gimple_stmt_iterator gsi; gassign *assign_stmt; bool in_combined_parallel = is_combined_parallel (region); bool broken_loop = region->cont == NULL; edge e, ne; tree *counts = NULL; int i; bool ordered_lastprivate = false; gcc_assert (!broken_loop || !in_combined_parallel); gcc_assert (fd->iter_type == long_integer_type_node || !in_combined_parallel); entry_bb = region->entry; cont_bb = region->cont; collapse_bb = NULL; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = split_edge (FALLTHRU_EDGE (entry_bb)); l1_bb = single_succ (l0_bb); if (!broken_loop) { l2_bb = create_empty_bb (cont_bb); gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb || (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest == l1_bb)); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } else l2_bb = NULL; l3_bb = BRANCH_EDGE (entry_bb)->dest; exit_bb = region->exit; gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->ordered && omp_find_clause (gimple_omp_for_clauses (gsi_stmt (gsi)), OMP_CLAUSE_LASTPRIVATE)) ordered_lastprivate = false; if (fd->collapse > 1 || fd->ordered) { int first_zero_iter1 = -1, first_zero_iter2 = -1; basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL; counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter1_bb, first_zero_iter1, zero_iter2_bb, first_zero_iter2, l2_dom_bb); if (zero_iter1_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter1; i < (fd->ordered ? fd->ordered : fd->collapse); i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; gsi_prev (&gsi); e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_nondebug_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter1_bb)); } if (zero_iter2_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter2; i < fd->ordered; i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; if (zero_iter1_bb) make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU); else { gsi_prev (&gsi); e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_nondebug_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter2_bb)); } } if (fd->collapse == 1) { counts[0] = fd->loop.n2; fd->loop = fd->loops[0]; } } type = TREE_TYPE (fd->loop.v); istart0 = create_tmp_var (fd->iter_type, ".istart0"); iend0 = create_tmp_var (fd->iter_type, ".iend0"); TREE_ADDRESSABLE (istart0) = 1; TREE_ADDRESSABLE (iend0) = 1; /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type) && fd->ordered == 0) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } gimple_stmt_iterator gsif = gsi; gsi_prev (&gsif); tree arr = NULL_TREE; if (in_combined_parallel) { gcc_assert (fd->ordered == 0); /* In a combined parallel loop, emit a call to GOMP_loop_foo_next. */ t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); } else { tree t0, t1, t2, t3, t4; /* If this is not a combined parallel loop, emit a call to GOMP_loop_foo_start in ENTRY_BB. */ t4 = build_fold_addr_expr (iend0); t3 = build_fold_addr_expr (istart0); if (fd->ordered) { t0 = build_int_cst (unsigned_type_node, fd->ordered - fd->collapse + 1); arr = create_tmp_var (build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1), ".omp_counts"); DECL_NAMELESS (arr) = 1; TREE_ADDRESSABLE (arr) = 1; TREE_STATIC (arr) = 1; vec<constructor_elt, va_gc> *v; vec_alloc (v, fd->ordered - fd->collapse + 1); int idx; for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++) { tree c; if (idx == 0 && fd->collapse > 1) c = fd->loop.n2; else c = counts[idx + fd->collapse - 1]; tree purpose = size_int (idx); CONSTRUCTOR_APPEND_ELT (v, purpose, c); if (TREE_CODE (c) != INTEGER_CST) TREE_STATIC (arr) = 0; } DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v); if (!TREE_STATIC (arr)) force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR, void_type_node, arr), true, NULL_TREE, true, GSI_SAME_STMT); t1 = build_fold_addr_expr (arr); t2 = NULL_TREE; } else { t2 = fold_convert (fd->iter_type, fd->loop.step); t1 = fd->loop.n2; t0 = fd->loop.n1; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); t0 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); t1 = OMP_CLAUSE_DECL (innerc); } if (POINTER_TYPE_P (TREE_TYPE (t0)) && TYPE_PRECISION (TREE_TYPE (t0)) != TYPE_PRECISION (fd->iter_type)) { /* Avoid casting pointers to integer of a different size. */ tree itype = signed_type_for (type); t1 = fold_convert (fd->iter_type, fold_convert (itype, t1)); t0 = fold_convert (fd->iter_type, fold_convert (itype, t0)); } else { t1 = fold_convert (fd->iter_type, t1); t0 = fold_convert (fd->iter_type, t0); } if (bias) { t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias); t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias); } } if (fd->iter_type == long_integer_type_node || fd->ordered) { if (fd->chunk_size) { t = fold_convert (fd->iter_type, fd->chunk_size); t = omp_adjust_chunk_size (t, fd->simd_schedule); if (fd->ordered) t = build_call_expr (builtin_decl_explicit (start_fn), 5, t0, t1, t, t3, t4); else t = build_call_expr (builtin_decl_explicit (start_fn), 6, t0, t1, t2, t, t3, t4); } else if (fd->ordered) t = build_call_expr (builtin_decl_explicit (start_fn), 4, t0, t1, t3, t4); else t = build_call_expr (builtin_decl_explicit (start_fn), 5, t0, t1, t2, t3, t4); } else { tree t5; tree c_bool_type; tree bfn_decl; /* The GOMP_loop_ull_*start functions have additional boolean argument, true for < loops and false for > loops. In Fortran, the C bool type can be different from boolean_type_node. */ bfn_decl = builtin_decl_explicit (start_fn); c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl)); t5 = build_int_cst (c_bool_type, fd->loop.cond_code == LT_EXPR ? 1 : 0); if (fd->chunk_size) { tree bfn_decl = builtin_decl_explicit (start_fn); t = fold_convert (fd->iter_type, fd->chunk_size); t = omp_adjust_chunk_size (t, fd->simd_schedule); t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4); } else t = build_call_expr (builtin_decl_explicit (start_fn), 6, t5, t0, t1, t2, t3, t4); } } if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); if (arr && !TREE_STATIC (arr)) { tree clobber = build_constructor (TREE_TYPE (arr), NULL); TREE_THIS_VOLATILE (clobber) = 1; gsi_insert_before (&gsi, gimple_build_assign (arr, clobber), GSI_SAME_STMT); } gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); if (gsi_end_p (gsif)) gsif = gsi_after_labels (gsi_bb (gsif)); gsi_next (&gsif); /* Iteration setup for sequential loop goes in L0_BB. */ tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR && gimple_omp_for_kind (inner_stmt) == GF_OMP_FOR_KIND_SIMD); tree innerc = omp_find_clause (gimple_omp_for_clauses (inner_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); } gsi = gsi_start_bb (l0_bb); t = istart0; if (fd->ordered && fd->collapse == 1) t = fold_build2 (MULT_EXPR, fd->iter_type, t, fold_convert (fd->iter_type, fd->loop.step)); else if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (fd->ordered && fd->collapse == 1) { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, fold_convert (sizetype, t)); else { t = fold_convert (TREE_TYPE (startvar), t); t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, t); } } else { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); t = fold_convert (TREE_TYPE (startvar), t); } t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t = iend0; if (fd->ordered && fd->collapse == 1) t = fold_build2 (MULT_EXPR, fd->iter_type, t, fold_convert (fd->iter_type, fd->loop.step)); else if (bias) t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias); if (fd->ordered && fd->collapse == 1) { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, fold_convert (sizetype, t)); else { t = fold_convert (TREE_TYPE (startvar), t); t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar), fd->loop.n1, t); } } else { if (POINTER_TYPE_P (TREE_TYPE (startvar))) t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t); t = fold_convert (TREE_TYPE (startvar), t); } iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, iend); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend))) assign_stmt = gimple_build_assign (fd->loop.v, iend); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Handle linear clause adjustments. */ tree itercnt = NULL_TREE; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) for (tree c = gimple_omp_for_clauses (fd->for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { tree d = OMP_CLAUSE_DECL (c); bool is_ref = omp_is_reference (d); tree t = d, a, dest; if (is_ref) t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); tree type = TREE_TYPE (t); if (POINTER_TYPE_P (type)) type = sizetype; dest = unshare_expr (t); tree v = create_tmp_var (TREE_TYPE (t), NULL); expand_omp_build_assign (&gsif, v, t); if (itercnt == NULL_TREE) { itercnt = startvar; tree n1 = fd->loop.n1; if (POINTER_TYPE_P (TREE_TYPE (itercnt))) { itercnt = fold_convert (signed_type_for (TREE_TYPE (itercnt)), itercnt); n1 = fold_convert (TREE_TYPE (itercnt), n1); } itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt), itercnt, n1); itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt), itercnt, fd->loop.step); itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } a = fold_build2 (MULT_EXPR, type, fold_convert (type, itercnt), fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (dest, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (fd->ordered) { /* Until now, counts array contained number of iterations or variable containing it for ith loop. From now on, we need those counts only for collapsed loops, and only for the 2nd till the last collapsed one. Move those one element earlier, we'll use counts[fd->collapse - 1] for the first source/sink iteration counter and so on and counts[fd->ordered] as the array holding the current counter values for depend(source). */ if (fd->collapse > 1) memmove (counts, counts + 1, (fd->collapse - 1) * sizeof (counts[0])); if (broken_loop) { int i; for (i = fd->collapse; i < fd->ordered; i++) { tree type = TREE_TYPE (fd->loops[i].v); tree this_cond = fold_build2 (fd->loops[i].cond_code, boolean_type_node, fold_convert (type, fd->loops[i].n1), fold_convert (type, fd->loops[i].n2)); if (!integer_onep (this_cond)) break; } if (i < fd->ordered) { cont_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb); add_bb_to_loop (cont_bb, l1_bb->loop_father); gimple_stmt_iterator gsi = gsi_after_labels (cont_bb); gimple *g = gimple_build_omp_continue (fd->loop.v, fd->loop.v); gsi_insert_before (&gsi, g, GSI_SAME_STMT); make_edge (cont_bb, l3_bb, EDGE_FALLTHRU); make_edge (cont_bb, l1_bb, 0); l2_bb = create_empty_bb (cont_bb); broken_loop = false; } } expand_omp_ordered_source_sink (region, fd, counts, cont_bb); cont_bb = expand_omp_for_ordered_loops (fd, counts, cont_bb, l1_bb, ordered_lastprivate); if (counts[fd->collapse - 1]) { gcc_assert (fd->collapse == 1); gsi = gsi_last_bb (l0_bb); expand_omp_build_assign (&gsi, counts[fd->collapse - 1], istart0, true); gsi = gsi_last_bb (cont_bb); t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[fd->collapse - 1], build_int_cst (fd->iter_type, 1)); expand_omp_build_assign (&gsi, counts[fd->collapse - 1], t); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_zero_node, NULL_TREE, NULL_TREE); expand_omp_build_assign (&gsi, aref, counts[fd->collapse - 1]); t = counts[fd->collapse - 1]; } else if (fd->collapse > 1) t = fd->loop.v; else { t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v), fd->loops[0].v, fd->loops[0].n1); t = fold_convert (fd->iter_type, t); } gsi = gsi_last_bb (l0_bb); tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_zero_node, NULL_TREE, NULL_TREE); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); expand_omp_build_assign (&gsi, aref, t, true); } if (!broken_loop) { /* Code to control the increment and predicate for the sequential loop goes in the CONT_BB. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, fd->loop.step); else t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE) { if (fd->collapse > 1) t = fd->loop.v; else { t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v), fd->loops[0].v, fd->loops[0].n1); t = fold_convert (fd->iter_type, t); } tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered], size_zero_node, NULL_TREE, NULL_TREE); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); expand_omp_build_assign (&gsi, aref, t); } t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, iend); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb); /* Emit code to get the next parallel iteration in L2_BB. */ gsi = gsi_start_bb (l2_bb); t = build_call_expr (builtin_decl_explicit (next_fn), 2, build_fold_addr_expr (istart0), build_fold_addr_expr (iend0)); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (TREE_TYPE (t) != boolean_type_node) t = fold_build2 (NE_EXPR, boolean_type_node, t, build_int_cst (TREE_TYPE (t), 0)); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING); } /* Add the loop cleanup function. */ gsi = gsi_last_nondebug_bb (exit_bb); if (gimple_omp_return_nowait_p (gsi_stmt (gsi))) t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT); else if (gimple_omp_return_lhs (gsi_stmt (gsi))) t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL); else t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END); gcall *call_stmt = gimple_build_call (t, 0); if (gimple_omp_return_lhs (gsi_stmt (gsi))) gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi))); gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT); if (fd->ordered) { tree arr = counts[fd->ordered]; tree clobber = build_constructor (TREE_TYPE (arr), NULL); TREE_THIS_VOLATILE (clobber) = 1; gsi_insert_after (&gsi, gimple_build_assign (arr, clobber), GSI_SAME_STMT); } gsi_remove (&gsi, true); /* Connect the new blocks. */ find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE; find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { gimple_seq phis; e = find_edge (cont_bb, l3_bb); ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE); phis = phi_nodes (l3_bb); for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *phi = gsi_stmt (gsi); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne), PHI_ARG_DEF_FROM_EDGE (phi, e)); } remove_edge (e); make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE); e = find_edge (cont_bb, l1_bb); if (e == NULL) { e = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (e->dest) == l1_bb); } if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (e); e = NULL; } else if (fd->collapse > 1) { remove_edge (e); e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else e->flags = EDGE_TRUE_VALUE; if (e) { e->probability = profile_probability::guessed_always ().apply_scale (7, 8); find_edge (cont_bb, l2_bb)->probability = e->probability.invert (); } else { e = find_edge (cont_bb, l2_bb); e->flags = EDGE_FALLTHRU; } make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE); if (gimple_in_ssa_p (cfun)) { /* Add phis to the outer loop that connect to the phis in the inner, original loop, and move the loop entry value of the inner phi to the loop entry value of the outer phi. */ gphi_iterator psi; for (psi = gsi_start_phis (l3_bb); !gsi_end_p (psi); gsi_next (&psi)) { source_location locus; gphi *nphi; gphi *exit_phi = psi.phi (); if (virtual_operand_p (gimple_phi_result (exit_phi))) continue; edge l2_to_l3 = find_edge (l2_bb, l3_bb); tree exit_res = PHI_ARG_DEF_FROM_EDGE (exit_phi, l2_to_l3); basic_block latch = BRANCH_EDGE (cont_bb)->dest; edge latch_to_l1 = find_edge (latch, l1_bb); gphi *inner_phi = find_phi_with_arg_on_edge (exit_res, latch_to_l1); tree t = gimple_phi_result (exit_phi); tree new_res = copy_ssa_name (t, NULL); nphi = create_phi_node (new_res, l0_bb); edge l0_to_l1 = find_edge (l0_bb, l1_bb); t = PHI_ARG_DEF_FROM_EDGE (inner_phi, l0_to_l1); locus = gimple_phi_arg_location_from_edge (inner_phi, l0_to_l1); edge entry_to_l0 = find_edge (entry_bb, l0_bb); add_phi_arg (nphi, t, entry_to_l0, locus); edge l2_to_l0 = find_edge (l2_bb, l0_bb); add_phi_arg (nphi, exit_res, l2_to_l0, UNKNOWN_LOCATION); add_phi_arg (inner_phi, new_res, l0_to_l1, UNKNOWN_LOCATION); } } set_immediate_dominator (CDI_DOMINATORS, l2_bb, recompute_dominator (CDI_DOMINATORS, l2_bb)); set_immediate_dominator (CDI_DOMINATORS, l3_bb, recompute_dominator (CDI_DOMINATORS, l3_bb)); set_immediate_dominator (CDI_DOMINATORS, l0_bb, recompute_dominator (CDI_DOMINATORS, l0_bb)); set_immediate_dominator (CDI_DOMINATORS, l1_bb, recompute_dominator (CDI_DOMINATORS, l1_bb)); /* We enter expand_omp_for_generic with a loop. This original loop may have its own loop struct, or it may be part of an outer loop struct (which may be the fake loop). */ struct loop *outer_loop = entry_bb->loop_father; bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop; add_bb_to_loop (l2_bb, outer_loop); /* We've added a new loop around the original loop. Allocate the corresponding loop struct. */ struct loop *new_loop = alloc_loop (); new_loop->header = l0_bb; new_loop->latch = l2_bb; add_loop (new_loop, outer_loop); /* Allocate a loop structure for the original loop unless we already had one. */ if (!orig_loop_has_loop_struct && !gimple_omp_for_combined_p (fd->for_stmt)) { struct loop *orig_loop = alloc_loop (); orig_loop->header = l1_bb; /* The loop may have multiple latches. */ add_loop (orig_loop, new_loop); } } } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and no specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2; if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; q = n / nthreads; tt = n % nthreads; if (threadid < tt) goto L3; else goto L4; L3: tt = 0; q = q + 1; L4: s0 = q * threadid + tt; e0 = s0 + q; V = s0 * STEP + N1; if (s0 >= e0) goto L2; else goto L0; L0: e = e0 * STEP + N1; L1: BODY; V += STEP; if (V cond e) goto L1; L2: */ static void expand_omp_for_static_nochunk (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree n, q, s0, e0, e, t, tt, nthreads, threadid; tree type, itype, vmain, vback; basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb; basic_block body_bb, cont_bb, collapse_bb = NULL; basic_block fin_bb; gimple_stmt_iterator gsi; edge ep; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); fin_bb = BRANCH_EDGE (entry_bb)->dest; gcc_assert (broken_loop || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest)); seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb)); body_bb = single_succ (seq_start_bb); if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } exit_bb = region->exit; /* Iteration space partitioning goes in ENTRY_BB. */ gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block l2_dom_bb = NULL, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); t = NULL_TREE; } else if (gimple_omp_for_combined_into_p (fd->for_stmt)) t = integer_one_node; else t = fold_binary (fd->loop.cond_code, boolean_type_node, fold_convert (type, fd->loop.n1), fold_convert (type, fd->loop.n2)); if (fd->collapse == 1 && TYPE_UNSIGNED (type) && (t == NULL_TREE || !integer_onep (t))) { n1 = fold_convert (type, unshare_expr (fd->loop.n1)); n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (type, unshare_expr (fd->loop.n2)); n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } ep = split_block (entry_bb, cond_stmt); ep->flags = EDGE_TRUE_VALUE; entry_bb = ep->dest; ep->probability = profile_probability::very_likely (); ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::very_unlikely (); if (gimple_in_ssa_p (cfun)) { int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx; for (gphi_iterator gpi = gsi_start_phis (fin_bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx), ep, UNKNOWN_LOCATION); } } gsi = gsi_last_bb (entry_bb); } switch (gimple_omp_for_kind (fd->for_stmt)) { case GF_OMP_FOR_KIND_FOR: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); break; case GF_OMP_FOR_KIND_DISTRIBUTE: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM); break; default: gcc_unreachable (); } nthreads = build_call_expr (nthreads, 0); nthreads = fold_convert (itype, nthreads); nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE, true, GSI_SAME_STMT); threadid = build_call_expr (threadid, 0); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); n1 = fd->loop.n1; n2 = fd->loop.n2; step = fd->loop.step; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, step, t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); q = create_tmp_reg (itype, "q"); t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT); tt = create_tmp_reg (itype, "tt"); t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT); gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT); t = build2 (LT_EXPR, boolean_type_node, threadid, tt); gcond *cond_stmt = gimple_build_cond_empty (t); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); second_bb = split_block (entry_bb, cond_stmt)->dest; gsi = gsi_last_nondebug_bb (second_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)), GSI_SAME_STMT); gassign *assign_stmt = gimple_build_assign (q, PLUS_EXPR, q, build_int_cst (itype, 1)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); third_bb = split_block (second_bb, assign_stmt)->dest; gsi = gsi_last_nondebug_bb (third_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); t = build2 (MULT_EXPR, itype, q, threadid); t = build2 (PLUS_EXPR, itype, t, tt); s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = fold_build2 (PLUS_EXPR, itype, s0, q); e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build2 (GE_EXPR, boolean_type_node, s0, e0); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); /* Setup code for sequential iteration goes in SEQ_START_BB. */ gsi = gsi_start_bb (seq_start_bb); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL ? gimple_omp_parallel_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE) { int i; for (i = 1; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); } innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); if (innerc) { /* If needed (distribute parallel for with lastprivate), propagate down the total number of iterations. */ tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)), fd->loop.n2); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } } } t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Handle linear clause adjustments. */ tree itercnt = NULL_TREE; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) for (tree c = gimple_omp_for_clauses (fd->for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { tree d = OMP_CLAUSE_DECL (c); bool is_ref = omp_is_reference (d); tree t = d, a, dest; if (is_ref) t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); if (itercnt == NULL_TREE) { if (gimple_omp_for_combined_into_p (fd->for_stmt)) { itercnt = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1), fold_convert (itype, fd->loop.n1)); itercnt = fold_build2 (EXACT_DIV_EXPR, itype, itercnt, step); itercnt = fold_build2 (PLUS_EXPR, itype, itercnt, s0); itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } else itercnt = s0; } tree type = TREE_TYPE (t); if (POINTER_TYPE_P (type)) type = sizetype; a = fold_build2 (MULT_EXPR, type, fold_convert (type, itercnt), fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); dest = unshare_expr (t); t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR : POINTER_PLUS_EXPR, TREE_TYPE (t), t, a); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (dest, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (!broken_loop) { /* The code controlling the sequential loop replaces the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb); } /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ gsi = gsi_last_nondebug_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (gsi))) { t = gimple_omp_return_lhs (gsi_stmt (gsi)); gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT); } gsi_remove (&gsi, true); /* Connect all the blocks. */ ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE); ep->probability = profile_probability::guessed_always ().apply_scale (3, 4); ep = find_edge (entry_bb, second_bb); ep->flags = EDGE_TRUE_VALUE; ep->probability = profile_probability::guessed_always ().apply_scale (1, 4); find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE; find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE; if (!broken_loop) { ep = find_edge (cont_bb, body_bb); if (ep == NULL) { ep = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (ep->dest) == body_bb); } if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (ep); ep = NULL; } else if (fd->collapse > 1) { remove_edge (ep); ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else ep->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, fin_bb)->flags = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; } set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); struct loop *loop = body_bb->loop_father; if (loop != entry_bb->loop_father) { gcc_assert (broken_loop || loop->header == body_bb); gcc_assert (broken_loop || loop->latch == region->cont || single_pred (loop->latch) == region->cont); return; } if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt)) { loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, body_bb->loop_father); } } /* Return phi in E->DEST with ARG on edge E. */ static gphi * find_phi_with_arg_on_edge (tree arg, edge e) { basic_block bb = e->dest; for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); if (PHI_ARG_DEF_FROM_EDGE (phi, e) == arg) return phi; } return NULL; } /* A subroutine of expand_omp_for. Generate code for a parallel loop with static schedule and a specified chunk size. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2; if (cond is <) adj = STEP - 1; else adj = STEP + 1; if ((__typeof (V)) -1 > 0 && cond is >) n = -(adj + N2 - N1) / -STEP; else n = (adj + N2 - N1) / STEP; trip = 0; V = threadid * CHUNK * STEP + N1; -- this extra definition of V is here so that V is defined if the loop is not entered L0: s0 = (trip * nthreads + threadid) * CHUNK; e0 = min (s0 + CHUNK, n); if (s0 < n) goto L1; else goto L4; L1: V = s0 * STEP + N1; e = e0 * STEP + N1; L2: BODY; V += STEP; if (V cond e) goto L2; else goto L3; L3: trip += 1; goto L0; L4: */ static void expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree n, s0, e0, e, t; tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid; tree type, itype, vmain, vback, vextra; basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb; basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb; gimple_stmt_iterator gsi; edge se; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); entry_bb = region->entry; se = split_block (entry_bb, last_stmt (entry_bb)); entry_bb = se->src; iter_part_bb = se->dest; cont_bb = region->cont; gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2); fin_bb = BRANCH_EDGE (iter_part_bb)->dest; gcc_assert (broken_loop || fin_bb == FALLTHRU_EDGE (cont_bb)->dest); seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb)); body_bb = single_succ (seq_start_bb); if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb || single_succ (BRANCH_EDGE (cont_bb)->dest) == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb)); } exit_bb = region->exit; /* Trip and adjustment setup goes in ENTRY_BB. */ gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block l2_dom_bb = NULL, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); t = NULL_TREE; } else if (gimple_omp_for_combined_into_p (fd->for_stmt)) t = integer_one_node; else t = fold_binary (fd->loop.cond_code, boolean_type_node, fold_convert (type, fd->loop.n1), fold_convert (type, fd->loop.n2)); if (fd->collapse == 1 && TYPE_UNSIGNED (type) && (t == NULL_TREE || !integer_onep (t))) { n1 = fold_convert (type, unshare_expr (fd->loop.n1)); n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE, true, GSI_SAME_STMT); n2 = fold_convert (type, unshare_expr (fd->loop.n2)); n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE, true, GSI_SAME_STMT); gcond *cond_stmt = gimple_build_cond (fd->loop.cond_code, n1, n2, NULL_TREE, NULL_TREE); gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } se = split_block (entry_bb, cond_stmt); se->flags = EDGE_TRUE_VALUE; entry_bb = se->dest; se->probability = profile_probability::very_likely (); se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE); se->probability = profile_probability::very_unlikely (); if (gimple_in_ssa_p (cfun)) { int dest_idx = find_edge (iter_part_bb, fin_bb)->dest_idx; for (gphi_iterator gpi = gsi_start_phis (fin_bb); !gsi_end_p (gpi); gsi_next (&gpi)) { gphi *phi = gpi.phi (); add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx), se, UNKNOWN_LOCATION); } } gsi = gsi_last_bb (entry_bb); } switch (gimple_omp_for_kind (fd->for_stmt)) { case GF_OMP_FOR_KIND_FOR: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM); break; case GF_OMP_FOR_KIND_DISTRIBUTE: nthreads = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_TEAMS); threadid = builtin_decl_explicit (BUILT_IN_OMP_GET_TEAM_NUM); break; default: gcc_unreachable (); } nthreads = build_call_expr (nthreads, 0); nthreads = fold_convert (itype, nthreads); nthreads = force_gimple_operand_gsi (&gsi, nthreads, true, NULL_TREE, true, GSI_SAME_STMT); threadid = build_call_expr (threadid, 0); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); n1 = fd->loop.n1; n2 = fd->loop.n2; step = fd->loop.step; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); tree chunk_size = fold_convert (itype, fd->chunk_size); chunk_size = omp_adjust_chunk_size (chunk_size, fd->simd_schedule); chunk_size = force_gimple_operand_gsi (&gsi, chunk_size, true, NULL_TREE, true, GSI_SAME_STMT); t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1)); t = fold_build2 (PLUS_EXPR, itype, step, t); t = fold_build2 (PLUS_EXPR, itype, t, n2); t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1)); if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR) t = fold_build2 (TRUNC_DIV_EXPR, itype, fold_build1 (NEGATE_EXPR, itype, t), fold_build1 (NEGATE_EXPR, itype, step)); else t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step); t = fold_convert (itype, t); n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); trip_var = create_tmp_reg (itype, ".trip"); if (gimple_in_ssa_p (cfun)) { trip_init = make_ssa_name (trip_var); trip_main = make_ssa_name (trip_var); trip_back = make_ssa_name (trip_var); } else { trip_init = trip_var; trip_main = trip_var; trip_back = trip_var; } gassign *assign_stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0)); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = fold_build2 (MULT_EXPR, itype, threadid, chunk_size); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); vextra = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR. */ gsi_remove (&gsi, true); gimple_stmt_iterator gsif = gsi; /* Iteration space partitioning goes in ITER_PART_BB. */ gsi = gsi_last_bb (iter_part_bb); t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads); t = fold_build2 (PLUS_EXPR, itype, t, threadid); t = fold_build2 (MULT_EXPR, itype, t, chunk_size); s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = fold_build2 (PLUS_EXPR, itype, s0, chunk_size); t = fold_build2 (MIN_EXPR, itype, t, n); e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (LT_EXPR, boolean_type_node, s0, n); gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING); /* Setup code for sequential iteration goes in SEQ_START_BB. */ gsi = gsi_start_bb (seq_start_bb); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL ? gimple_omp_parallel_clauses (inner_stmt) : gimple_omp_for_clauses (inner_stmt); tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST && gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE) { int i; for (i = 1; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); } innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); if (innerc) { /* If needed (distribute parallel for with lastprivate), propagate down the total number of iterations. */ tree t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (innerc)), fd->loop.n2); t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } } } t = fold_convert (itype, s0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t = fold_convert (itype, e0); t = fold_build2 (MULT_EXPR, itype, t, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (TREE_TYPE (startvar), t); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Handle linear clause adjustments. */ tree itercnt = NULL_TREE, itercntbias = NULL_TREE; if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR) for (tree c = gimple_omp_for_clauses (fd->for_stmt); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && !OMP_CLAUSE_LINEAR_NO_COPYIN (c)) { tree d = OMP_CLAUSE_DECL (c); bool is_ref = omp_is_reference (d); tree t = d, a, dest; if (is_ref) t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t); tree type = TREE_TYPE (t); if (POINTER_TYPE_P (type)) type = sizetype; dest = unshare_expr (t); tree v = create_tmp_var (TREE_TYPE (t), NULL); expand_omp_build_assign (&gsif, v, t); if (itercnt == NULL_TREE) { if (gimple_omp_for_combined_into_p (fd->for_stmt)) { itercntbias = fold_build2 (MINUS_EXPR, itype, fold_convert (itype, n1), fold_convert (itype, fd->loop.n1)); itercntbias = fold_build2 (EXACT_DIV_EXPR, itype, itercntbias, step); itercntbias = force_gimple_operand_gsi (&gsif, itercntbias, true, NULL_TREE, true, GSI_SAME_STMT); itercnt = fold_build2 (PLUS_EXPR, itype, itercntbias, s0); itercnt = force_gimple_operand_gsi (&gsi, itercnt, true, NULL_TREE, false, GSI_CONTINUE_LINKING); } else itercnt = s0; } a = fold_build2 (MULT_EXPR, type, fold_convert (type, itercnt), fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c))); t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (dest, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (!broken_loop) { /* The code controlling the sequential loop goes in CONT_BB, replacing the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); if (DECL_P (vback) && TREE_ADDRESSABLE (vback)) t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); if (tree_int_cst_equal (fd->chunk_size, integer_one_node)) t = build2 (EQ_EXPR, boolean_type_node, build_int_cst (itype, 0), build_int_cst (itype, 1)); else t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb); /* Trip update code goes into TRIP_UPDATE_BB. */ gsi = gsi_start_bb (trip_update_bb); t = build_int_cst (itype, 1); t = build2 (PLUS_EXPR, itype, trip_main, t); assign_stmt = gimple_build_assign (trip_back, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */ gsi = gsi_last_nondebug_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (gsi))) { t = gimple_omp_return_lhs (gsi_stmt (gsi)); gsi_insert_after (&gsi, omp_build_barrier (t), GSI_SAME_STMT); } gsi_remove (&gsi, true); /* Connect the new blocks. */ find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE; find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE; if (!broken_loop) { se = find_edge (cont_bb, body_bb); if (se == NULL) { se = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (se->dest) == body_bb); } if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (se); se = NULL; } else if (fd->collapse > 1) { remove_edge (se); se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else se->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, trip_update_bb)->flags = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb); } if (gimple_in_ssa_p (cfun)) { gphi_iterator psi; gphi *phi; edge re, ene; edge_var_map *vm; size_t i; gcc_assert (fd->collapse == 1 && !broken_loop); /* When we redirect the edge from trip_update_bb to iter_part_bb, we remove arguments of the phi nodes in fin_bb. We need to create appropriate phi nodes in iter_part_bb instead. */ se = find_edge (iter_part_bb, fin_bb); re = single_succ_edge (trip_update_bb); vec<edge_var_map> *head = redirect_edge_var_map_vector (re); ene = single_succ_edge (entry_bb); psi = gsi_start_phis (fin_bb); for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm); gsi_next (&psi), ++i) { gphi *nphi; source_location locus; phi = psi.phi (); if (operand_equal_p (gimple_phi_arg_def (phi, 0), redirect_edge_var_map_def (vm), 0)) continue; t = gimple_phi_result (phi); gcc_assert (t == redirect_edge_var_map_result (vm)); if (!single_pred_p (fin_bb)) t = copy_ssa_name (t, phi); nphi = create_phi_node (t, iter_part_bb); t = PHI_ARG_DEF_FROM_EDGE (phi, se); locus = gimple_phi_arg_location_from_edge (phi, se); /* A special case -- fd->loop.v is not yet computed in iter_part_bb, we need to use vextra instead. */ if (t == fd->loop.v) t = vextra; add_phi_arg (nphi, t, ene, locus); locus = redirect_edge_var_map_location (vm); tree back_arg = redirect_edge_var_map_def (vm); add_phi_arg (nphi, back_arg, re, locus); edge ce = find_edge (cont_bb, body_bb); if (ce == NULL) { ce = BRANCH_EDGE (cont_bb); gcc_assert (single_succ (ce->dest) == body_bb); ce = single_succ_edge (ce->dest); } gphi *inner_loop_phi = find_phi_with_arg_on_edge (back_arg, ce); gcc_assert (inner_loop_phi != NULL); add_phi_arg (inner_loop_phi, gimple_phi_result (nphi), find_edge (seq_start_bb, body_bb), locus); if (!single_pred_p (fin_bb)) add_phi_arg (phi, gimple_phi_result (nphi), se, locus); } gcc_assert (gsi_end_p (psi) && (head == NULL || i == head->length ())); redirect_edge_var_map_clear (re); if (single_pred_p (fin_bb)) while (1) { psi = gsi_start_phis (fin_bb); if (gsi_end_p (psi)) break; remove_phi_node (&psi, false); } /* Make phi node for trip. */ phi = create_phi_node (trip_main, iter_part_bb); add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb), UNKNOWN_LOCATION); add_phi_arg (phi, trip_init, single_succ_edge (entry_bb), UNKNOWN_LOCATION); } if (!broken_loop) set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb); set_immediate_dominator (CDI_DOMINATORS, iter_part_bb, recompute_dominator (CDI_DOMINATORS, iter_part_bb)); set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, recompute_dominator (CDI_DOMINATORS, seq_start_bb)); set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); if (!broken_loop) { struct loop *loop = body_bb->loop_father; struct loop *trip_loop = alloc_loop (); trip_loop->header = iter_part_bb; trip_loop->latch = trip_update_bb; add_loop (trip_loop, iter_part_bb->loop_father); if (loop != entry_bb->loop_father) { gcc_assert (loop->header == body_bb); gcc_assert (loop->latch == region->cont || single_pred (loop->latch) == region->cont); trip_loop->inner = loop; return; } if (!gimple_omp_for_combined_p (fd->for_stmt)) { loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, trip_loop); } } } /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing loop. Given parameters: for (V = N1; V cond N2; V += STEP) BODY; where COND is "<" or ">", we generate pseudocode V = N1; goto L1; L0: BODY; V += STEP; L1: if (V cond N2) goto L0; else goto L2; L2: For collapsed loops, given parameters: collapse(3) for (V1 = N11; V1 cond1 N12; V1 += STEP1) for (V2 = N21; V2 cond2 N22; V2 += STEP2) for (V3 = N31; V3 cond3 N32; V3 += STEP3) BODY; we generate pseudocode if (cond3 is <) adj = STEP3 - 1; else adj = STEP3 + 1; count3 = (adj + N32 - N31) / STEP3; if (cond2 is <) adj = STEP2 - 1; else adj = STEP2 + 1; count2 = (adj + N22 - N21) / STEP2; if (cond1 is <) adj = STEP1 - 1; else adj = STEP1 + 1; count1 = (adj + N12 - N11) / STEP1; count = count1 * count2 * count3; V = 0; V1 = N11; V2 = N21; V3 = N31; goto L1; L0: BODY; V += 1; V3 += STEP3; V2 += (V3 cond3 N32) ? 0 : STEP2; V3 = (V3 cond3 N32) ? V3 : N31; V1 += (V2 cond2 N22) ? 0 : STEP1; V2 = (V2 cond2 N22) ? V2 : N21; L1: if (V < count) goto L0; else goto L2; L2: */ static void expand_omp_simd (struct omp_region *region, struct omp_for_data *fd) { tree type, t; basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb; gimple_stmt_iterator gsi; gimple *stmt; gcond *cond_stmt; bool broken_loop = region->cont == NULL; edge e, ne; tree *counts = NULL; int i; int safelen_int = INT_MAX; tree safelen = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE_SAFELEN); tree simduid = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__SIMDUID_); tree n1, n2; if (safelen) { poly_uint64 val; safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen); if (!poly_int_tree_p (safelen, &val)) safelen_int = 0; else safelen_int = MIN (constant_lower_bound (val), INT_MAX); if (safelen_int == 1) safelen_int = 0; } type = TREE_TYPE (fd->loop.v); entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (broken_loop || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); l0_bb = FALLTHRU_EDGE (entry_bb)->dest; if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest; l2_bb = BRANCH_EDGE (entry_bb)->dest; } else { BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL; l1_bb = split_edge (BRANCH_EDGE (entry_bb)); l2_bb = single_succ (l1_bb); } exit_bb = region->exit; l2_dom_bb = NULL; gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); /* Not needed in SSA form right now. */ gcc_assert (!gimple_in_ssa_p (cfun)); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block zero_iter_bb = l2_bb, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); } if (l2_dom_bb == NULL) l2_dom_bb = l1_bb; n1 = fd->loop.n1; n2 = fd->loop.n2; if (gimple_omp_for_combined_into_p (fd->for_stmt)) { tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); } tree step = fd->loop.step; bool is_simt = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__SIMT_); if (is_simt) { cfun->curr_properties &= ~PROP_gimple_lomp_dev; is_simt = safelen_int > 1; } tree simt_lane = NULL_TREE, simt_maxlane = NULL_TREE; if (is_simt) { simt_lane = create_tmp_var (unsigned_type_node); gimple *g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0); gimple_call_set_lhs (g, simt_lane); gsi_insert_before (&gsi, g, GSI_SAME_STMT); tree offset = fold_build2 (MULT_EXPR, TREE_TYPE (step), step, fold_convert (TREE_TYPE (step), simt_lane)); n1 = fold_convert (type, n1); if (POINTER_TYPE_P (type)) n1 = fold_build_pointer_plus (n1, offset); else n1 = fold_build2 (PLUS_EXPR, type, n1, fold_convert (type, offset)); /* Collapsed loops not handled for SIMT yet: limit to one lane only. */ if (fd->collapse > 1) simt_maxlane = build_one_cst (unsigned_type_node); else if (safelen_int < omp_max_simt_vf ()) simt_maxlane = build_int_cst (unsigned_type_node, safelen_int); tree vf = build_call_expr_internal_loc (UNKNOWN_LOCATION, IFN_GOMP_SIMT_VF, unsigned_type_node, 0); if (simt_maxlane) vf = fold_build2 (MIN_EXPR, unsigned_type_node, vf, simt_maxlane); vf = fold_convert (TREE_TYPE (step), vf); step = fold_build2 (MULT_EXPR, TREE_TYPE (step), step, vf); } expand_omp_build_assign (&gsi, fd->loop.v, fold_convert (type, n1)); if (fd->collapse > 1) { if (gimple_omp_for_combined_into_p (fd->for_stmt)) { gsi_prev (&gsi); expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1); gsi_next (&gsi); } else for (i = 0; i < fd->collapse; i++) { tree itype = TREE_TYPE (fd->loops[i].v); if (POINTER_TYPE_P (itype)) itype = signed_type_for (itype); t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1); expand_omp_build_assign (&gsi, fd->loops[i].v, t); } } /* Remove the GIMPLE_OMP_FOR statement. */ gsi_remove (&gsi, true); if (!broken_loop) { /* Code to control the increment goes in the CONT_BB. */ gsi = gsi_last_nondebug_bb (cont_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.v, step); else t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step); expand_omp_build_assign (&gsi, fd->loop.v, t); if (fd->collapse > 1) { i = fd->collapse - 1; if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))) { t = fold_convert (sizetype, fd->loops[i].step); t = fold_build_pointer_plus (fd->loops[i].v, t); } else { t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].step); t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v), fd->loops[i].v, t); } expand_omp_build_assign (&gsi, fd->loops[i].v, t); for (i = fd->collapse - 1; i > 0; i--) { tree itype = TREE_TYPE (fd->loops[i].v); tree itype2 = TREE_TYPE (fd->loops[i - 1].v); if (POINTER_TYPE_P (itype2)) itype2 = signed_type_for (itype2); t = fold_convert (itype2, fd->loops[i - 1].step); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build3 (COND_EXPR, itype2, build2 (fd->loops[i].cond_code, boolean_type_node, fd->loops[i].v, fold_convert (itype, fd->loops[i].n2)), build_int_cst (itype2, 0), t); if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v))) t = fold_build_pointer_plus (fd->loops[i - 1].v, t); else t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t); expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t); t = fold_convert (itype, fd->loops[i].n1); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT); t = build3 (COND_EXPR, itype, build2 (fd->loops[i].cond_code, boolean_type_node, fd->loops[i].v, fold_convert (itype, fd->loops[i].n2)), fd->loops[i].v, t); expand_omp_build_assign (&gsi, fd->loops[i].v, t); } } /* Remove GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); } /* Emit the condition in L1_BB. */ gsi = gsi_start_bb (l1_bb); t = fold_convert (type, n2); t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); tree v = fd->loop.v; if (DECL_P (v) && TREE_ADDRESSABLE (v)) v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE, false, GSI_CONTINUE_LINKING); t = build2 (fd->loop.cond_code, boolean_type_node, v, t); cond_stmt = gimple_build_cond_empty (t); gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING); if (walk_tree (gimple_cond_lhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL) || walk_tree (gimple_cond_rhs_ptr (cond_stmt), expand_omp_regimplify_p, NULL, NULL)) { gsi = gsi_for_stmt (cond_stmt); gimple_regimplify_operands (cond_stmt, &gsi); } /* Add 'V -= STEP * (SIMT_VF - 1)' after the loop. */ if (is_simt) { gsi = gsi_start_bb (l2_bb); step = fold_build2 (MINUS_EXPR, TREE_TYPE (step), fd->loop.step, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (fd->loop.v, step); else t = fold_build2 (PLUS_EXPR, type, fd->loop.v, step); expand_omp_build_assign (&gsi, fd->loop.v, t); } /* Remove GIMPLE_OMP_RETURN. */ gsi = gsi_last_nondebug_bb (exit_bb); gsi_remove (&gsi, true); /* Connect the new blocks. */ remove_edge (FALLTHRU_EDGE (entry_bb)); if (!broken_loop) { remove_edge (BRANCH_EDGE (entry_bb)); make_edge (entry_bb, l1_bb, EDGE_FALLTHRU); e = BRANCH_EDGE (l1_bb); ne = FALLTHRU_EDGE (l1_bb); e->flags = EDGE_TRUE_VALUE; } else { single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; ne = single_succ_edge (l1_bb); e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE); } ne->flags = EDGE_FALSE_VALUE; e->probability = profile_probability::guessed_always ().apply_scale (7, 8); ne->probability = e->probability.invert (); set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb); set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb); if (simt_maxlane) { cond_stmt = gimple_build_cond (LT_EXPR, simt_lane, simt_maxlane, NULL_TREE, NULL_TREE); gsi = gsi_last_bb (entry_bb); gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT); make_edge (entry_bb, l2_bb, EDGE_FALSE_VALUE); FALLTHRU_EDGE (entry_bb)->flags = EDGE_TRUE_VALUE; FALLTHRU_EDGE (entry_bb)->probability = profile_probability::guessed_always ().apply_scale (7, 8); BRANCH_EDGE (entry_bb)->probability = FALLTHRU_EDGE (entry_bb)->probability.invert (); l2_dom_bb = entry_bb; } set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb); if (!broken_loop) { struct loop *loop = alloc_loop (); loop->header = l1_bb; loop->latch = cont_bb; add_loop (loop, l1_bb->loop_father); loop->safelen = safelen_int; if (simduid) { loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid); cfun->has_simduid_loops = true; } /* If not -fno-tree-loop-vectorize, hint that we want to vectorize the loop. */ if ((flag_tree_loop_vectorize || !global_options_set.x_flag_tree_loop_vectorize) && flag_tree_loop_optimize && loop->safelen > 1) { loop->force_vectorize = true; cfun->has_force_vectorize_loops = true; } } else if (simduid) cfun->has_simduid_loops = true; } /* Taskloop construct is represented after gimplification with two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched in between them. This routine expands the outer GIMPLE_OMP_FOR, which should just compute all the needed loop temporaries for GIMPLE_OMP_TASK. */ static void expand_omp_taskloop_for_outer (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree type, bias = NULL_TREE; basic_block entry_bb, cont_bb, exit_bb; gimple_stmt_iterator gsi; gassign *assign_stmt; tree *counts = NULL; int i; gcc_assert (inner_stmt); gcc_assert (region->cont); gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_TASK && gimple_omp_task_taskloop_p (inner_stmt)); type = TREE_TYPE (fd->loop.v); /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type)) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest); exit_bb = region->exit; gsi = gsi_last_nondebug_bb (entry_bb); gimple *for_stmt = gsi_stmt (gsi); gcc_assert (gimple_code (for_stmt) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block zero_iter_bb = NULL, dummy_bb = NULL, l2_dom_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, zero_iter_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); if (zero_iter_bb) { /* Some counts[i] vars might be uninitialized if some loop has zero iterations. But the body shouldn't be executed in that case, so just avoid uninit warnings. */ for (i = first_zero_iter; i < fd->collapse; i++) if (SSA_VAR_P (counts[i])) TREE_NO_WARNING (counts[i]) = 1; gsi_prev (&gsi); edge e = split_block (entry_bb, gsi_stmt (gsi)); entry_bb = e->dest; make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU); gsi = gsi_last_bb (entry_bb); set_immediate_dominator (CDI_DOMINATORS, entry_bb, get_immediate_dominator (CDI_DOMINATORS, zero_iter_bb)); } } tree t0, t1; t1 = fd->loop.n2; t0 = fd->loop.n1; if (POINTER_TYPE_P (TREE_TYPE (t0)) && TYPE_PRECISION (TREE_TYPE (t0)) != TYPE_PRECISION (fd->iter_type)) { /* Avoid casting pointers to integer of a different size. */ tree itype = signed_type_for (type); t1 = fold_convert (fd->iter_type, fold_convert (itype, t1)); t0 = fold_convert (fd->iter_type, fold_convert (itype, t0)); } else { t1 = fold_convert (fd->iter_type, t1); t0 = fold_convert (fd->iter_type, t0); } if (bias) { t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias); t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias); } tree innerc = omp_find_clause (gimple_omp_task_clauses (inner_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); tree startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); tree endvar = OMP_CLAUSE_DECL (innerc); if (fd->collapse > 1 && TREE_CODE (fd->loop.n2) != INTEGER_CST) { gcc_assert (innerc); for (i = 1; i < fd->collapse; i++) { innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); } innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); if (innerc) { /* If needed (inner taskloop has lastprivate clause), propagate down the total number of iterations. */ tree t = force_gimple_operand_gsi (&gsi, fd->loop.n2, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (OMP_CLAUSE_DECL (innerc), t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } } t0 = force_gimple_operand_gsi (&gsi, t0, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (startvar, t0); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t1 = force_gimple_operand_gsi (&gsi, t1, false, NULL_TREE, false, GSI_CONTINUE_LINKING); assign_stmt = gimple_build_assign (endvar, t1); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); /* Remove the GIMPLE_OMP_FOR statement. */ gsi = gsi_for_stmt (for_stmt); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (cont_bb); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (exit_bb); gsi_remove (&gsi, true); FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always (); remove_edge (BRANCH_EDGE (entry_bb)); FALLTHRU_EDGE (cont_bb)->probability = profile_probability::always (); remove_edge (BRANCH_EDGE (cont_bb)); set_immediate_dominator (CDI_DOMINATORS, exit_bb, cont_bb); set_immediate_dominator (CDI_DOMINATORS, region->entry, recompute_dominator (CDI_DOMINATORS, region->entry)); } /* Taskloop construct is represented after gimplification with two GIMPLE_OMP_FOR constructs with GIMPLE_OMP_TASK sandwiched in between them. This routine expands the inner GIMPLE_OMP_FOR. GOMP_taskloop{,_ull} function arranges for each task to be given just a single range of iterations. */ static void expand_omp_taskloop_for_inner (struct omp_region *region, struct omp_for_data *fd, gimple *inner_stmt) { tree e, t, type, itype, vmain, vback, bias = NULL_TREE; basic_block entry_bb, exit_bb, body_bb, cont_bb, collapse_bb = NULL; basic_block fin_bb; gimple_stmt_iterator gsi; edge ep; bool broken_loop = region->cont == NULL; tree *counts = NULL; tree n1, n2, step; itype = type = TREE_TYPE (fd->loop.v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); /* See if we need to bias by LLONG_MIN. */ if (fd->iter_type == long_long_unsigned_type_node && TREE_CODE (type) == INTEGER_TYPE && !TYPE_UNSIGNED (type)) { tree n1, n2; if (fd->loop.cond_code == LT_EXPR) { n1 = fd->loop.n1; n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step); } else { n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step); n2 = fd->loop.n1; } if (TREE_CODE (n1) != INTEGER_CST || TREE_CODE (n2) != INTEGER_CST || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0))) bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type)); } entry_bb = region->entry; cont_bb = region->cont; gcc_assert (EDGE_COUNT (entry_bb->succs) == 2); fin_bb = BRANCH_EDGE (entry_bb)->dest; gcc_assert (broken_loop || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest)); body_bb = FALLTHRU_EDGE (entry_bb)->dest; if (!broken_loop) { gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb); gcc_assert (EDGE_COUNT (cont_bb->succs) == 2); } exit_bb = region->exit; /* Iteration space partitioning goes in ENTRY_BB. */ gsi = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR); if (fd->collapse > 1) { int first_zero_iter = -1, dummy = -1; basic_block l2_dom_bb = NULL, dummy_bb = NULL; counts = XALLOCAVEC (tree, fd->collapse); expand_omp_for_init_counts (fd, &gsi, entry_bb, counts, fin_bb, first_zero_iter, dummy_bb, dummy, l2_dom_bb); t = NULL_TREE; } else t = integer_one_node; step = fd->loop.step; tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n1 = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); n2 = OMP_CLAUSE_DECL (innerc); if (bias) { n1 = fold_build2 (PLUS_EXPR, fd->iter_type, n1, bias); n2 = fold_build2 (PLUS_EXPR, fd->iter_type, n2, bias); } n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); tree startvar = fd->loop.v; tree endvar = NULL_TREE; if (gimple_omp_for_combined_p (fd->for_stmt)) { tree clauses = gimple_omp_for_clauses (inner_stmt); tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); startvar = OMP_CLAUSE_DECL (innerc); innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc), OMP_CLAUSE__LOOPTEMP_); gcc_assert (innerc); endvar = OMP_CLAUSE_DECL (innerc); } t = fold_convert (TREE_TYPE (startvar), n1); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, false, GSI_CONTINUE_LINKING); gimple *assign_stmt = gimple_build_assign (startvar, t); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); t = fold_convert (TREE_TYPE (startvar), n2); e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, false, GSI_CONTINUE_LINKING); if (endvar) { assign_stmt = gimple_build_assign (endvar, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (e))) assign_stmt = gimple_build_assign (fd->loop.v, e); else assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, e); gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING); } if (fd->collapse > 1) expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar); if (!broken_loop) { /* The code controlling the sequential loop replaces the GIMPLE_OMP_CONTINUE. */ gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont_stmt); vback = gimple_omp_continue_control_def (cont_stmt); if (!gimple_omp_for_combined_p (fd->for_stmt)) { if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (vmain, step); else t = fold_build2 (PLUS_EXPR, type, vmain, step); t = force_gimple_operand_gsi (&gsi, t, DECL_P (vback) && TREE_ADDRESSABLE (vback), NULL_TREE, true, GSI_SAME_STMT); assign_stmt = gimple_build_assign (vback, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); t = build2 (fd->loop.cond_code, boolean_type_node, DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e); gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT); } /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi_remove (&gsi, true); if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt)) collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb); } /* Remove the GIMPLE_OMP_FOR statement. */ gsi = gsi_for_stmt (fd->for_stmt); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_RETURN statement. */ gsi = gsi_last_nondebug_bb (exit_bb); gsi_remove (&gsi, true); FALLTHRU_EDGE (entry_bb)->probability = profile_probability::always (); if (!broken_loop) remove_edge (BRANCH_EDGE (entry_bb)); else { remove_edge_and_dominated_blocks (BRANCH_EDGE (entry_bb)); region->outer->cont = NULL; } /* Connect all the blocks. */ if (!broken_loop) { ep = find_edge (cont_bb, body_bb); if (gimple_omp_for_combined_p (fd->for_stmt)) { remove_edge (ep); ep = NULL; } else if (fd->collapse > 1) { remove_edge (ep); ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE); } else ep->flags = EDGE_TRUE_VALUE; find_edge (cont_bb, fin_bb)->flags = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU; } set_immediate_dominator (CDI_DOMINATORS, body_bb, recompute_dominator (CDI_DOMINATORS, body_bb)); if (!broken_loop) set_immediate_dominator (CDI_DOMINATORS, fin_bb, recompute_dominator (CDI_DOMINATORS, fin_bb)); if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt)) { struct loop *loop = alloc_loop (); loop->header = body_bb; if (collapse_bb == NULL) loop->latch = cont_bb; add_loop (loop, body_bb->loop_father); } } /* A subroutine of expand_omp_for. Generate code for an OpenACC partitioned loop. The lowering here is abstracted, in that the loop parameters are passed through internal functions, which are further lowered by oacc_device_lower, once we get to the target compiler. The loop is of the form: for (V = B; V LTGT E; V += S) {BODY} where LTGT is < or >. We may have a specified chunking size, CHUNKING (constant 0 for no chunking) and we will have a GWV partitioning mask, specifying dimensions over which the loop is to be partitioned (see note below). We generate code that looks like (this ignores tiling): <entry_bb> [incoming FALL->body, BRANCH->exit] typedef signedintify (typeof (V)) T; // underlying signed integral type T range = E - B; T chunk_no = 0; T DIR = LTGT == '<' ? +1 : -1; T chunk_max = GOACC_LOOP_CHUNK (dir, range, S, CHUNK_SIZE, GWV); T step = GOACC_LOOP_STEP (dir, range, S, CHUNK_SIZE, GWV); <head_bb> [created by splitting end of entry_bb] T offset = GOACC_LOOP_OFFSET (dir, range, S, CHUNK_SIZE, GWV, chunk_no); T bound = GOACC_LOOP_BOUND (dir, range, S, CHUNK_SIZE, GWV, offset); if (!(offset LTGT bound)) goto bottom_bb; <body_bb> [incoming] V = B + offset; {BODY} <cont_bb> [incoming, may == body_bb FALL->exit_bb, BRANCH->body_bb] offset += step; if (offset LTGT bound) goto body_bb; [*] <bottom_bb> [created by splitting start of exit_bb] insert BRANCH->head_bb chunk_no++; if (chunk < chunk_max) goto head_bb; <exit_bb> [incoming] V = B + ((range -/+ 1) / S +/- 1) * S [*] [*] Needed if V live at end of loop. */ static void expand_oacc_for (struct omp_region *region, struct omp_for_data *fd) { tree v = fd->loop.v; enum tree_code cond_code = fd->loop.cond_code; enum tree_code plus_code = PLUS_EXPR; tree chunk_size = integer_minus_one_node; tree gwv = integer_zero_node; tree iter_type = TREE_TYPE (v); tree diff_type = iter_type; tree plus_type = iter_type; struct oacc_collapse *counts = NULL; gcc_checking_assert (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP); gcc_assert (!gimple_omp_for_combined_into_p (fd->for_stmt)); gcc_assert (cond_code == LT_EXPR || cond_code == GT_EXPR); if (POINTER_TYPE_P (iter_type)) { plus_code = POINTER_PLUS_EXPR; plus_type = sizetype; } if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type)) diff_type = signed_type_for (diff_type); if (TYPE_PRECISION (diff_type) < TYPE_PRECISION (integer_type_node)) diff_type = integer_type_node; basic_block entry_bb = region->entry; /* BB ending in OMP_FOR */ basic_block exit_bb = region->exit; /* BB ending in OMP_RETURN */ basic_block cont_bb = region->cont; /* BB ending in OMP_CONTINUE */ basic_block bottom_bb = NULL; /* entry_bb has two sucessors; the branch edge is to the exit block, fallthrough edge to body. */ gcc_assert (EDGE_COUNT (entry_bb->succs) == 2 && BRANCH_EDGE (entry_bb)->dest == exit_bb); /* If cont_bb non-NULL, it has 2 successors. The branch successor is body_bb, or to a block whose only successor is the body_bb. Its fallthrough successor is the final block (same as the branch successor of the entry_bb). */ if (cont_bb) { basic_block body_bb = FALLTHRU_EDGE (entry_bb)->dest; basic_block bed = BRANCH_EDGE (cont_bb)->dest; gcc_assert (FALLTHRU_EDGE (cont_bb)->dest == exit_bb); gcc_assert (bed == body_bb || single_succ_edge (bed)->dest == body_bb); } else gcc_assert (!gimple_in_ssa_p (cfun)); /* The exit block only has entry_bb and cont_bb as predecessors. */ gcc_assert (EDGE_COUNT (exit_bb->preds) == 1 + (cont_bb != NULL)); tree chunk_no; tree chunk_max = NULL_TREE; tree bound, offset; tree step = create_tmp_var (diff_type, ".step"); bool up = cond_code == LT_EXPR; tree dir = build_int_cst (diff_type, up ? +1 : -1); bool chunking = !gimple_in_ssa_p (cfun); bool negating; /* Tiling vars. */ tree tile_size = NULL_TREE; tree element_s = NULL_TREE; tree e_bound = NULL_TREE, e_offset = NULL_TREE, e_step = NULL_TREE; basic_block elem_body_bb = NULL; basic_block elem_cont_bb = NULL; /* SSA instances. */ tree offset_incr = NULL_TREE; tree offset_init = NULL_TREE; gimple_stmt_iterator gsi; gassign *ass; gcall *call; gimple *stmt; tree expr; location_t loc; edge split, be, fte; /* Split the end of entry_bb to create head_bb. */ split = split_block (entry_bb, last_stmt (entry_bb)); basic_block head_bb = split->dest; entry_bb = split->src; /* Chunk setup goes at end of entry_bb, replacing the omp_for. */ gsi = gsi_last_nondebug_bb (entry_bb); gomp_for *for_stmt = as_a <gomp_for *> (gsi_stmt (gsi)); loc = gimple_location (for_stmt); if (gimple_in_ssa_p (cfun)) { offset_init = gimple_omp_for_index (for_stmt, 0); gcc_assert (integer_zerop (fd->loop.n1)); /* The SSA parallelizer does gang parallelism. */ gwv = build_int_cst (integer_type_node, GOMP_DIM_MASK (GOMP_DIM_GANG)); } if (fd->collapse > 1 || fd->tiling) { gcc_assert (!gimple_in_ssa_p (cfun) && up); counts = XALLOCAVEC (struct oacc_collapse, fd->collapse); tree total = expand_oacc_collapse_init (fd, &gsi, counts, TREE_TYPE (fd->loop.n2), loc); if (SSA_VAR_P (fd->loop.n2)) { total = force_gimple_operand_gsi (&gsi, total, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (fd->loop.n2, total); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); } } tree b = fd->loop.n1; tree e = fd->loop.n2; tree s = fd->loop.step; b = force_gimple_operand_gsi (&gsi, b, true, NULL_TREE, true, GSI_SAME_STMT); e = force_gimple_operand_gsi (&gsi, e, true, NULL_TREE, true, GSI_SAME_STMT); /* Convert the step, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (TREE_TYPE (s)); if (negating) s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s); s = fold_convert (diff_type, s); if (negating) s = fold_build1 (NEGATE_EXPR, diff_type, s); s = force_gimple_operand_gsi (&gsi, s, true, NULL_TREE, true, GSI_SAME_STMT); if (!chunking) chunk_size = integer_zero_node; expr = fold_convert (diff_type, chunk_size); chunk_size = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); if (fd->tiling) { /* Determine the tile size and element step, modify the outer loop step size. */ tile_size = create_tmp_var (diff_type, ".tile_size"); expr = build_int_cst (diff_type, 1); for (int ix = 0; ix < fd->collapse; ix++) expr = fold_build2 (MULT_EXPR, diff_type, counts[ix].tile, expr); expr = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (tile_size, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); element_s = create_tmp_var (diff_type, ".element_s"); ass = gimple_build_assign (element_s, s); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); expr = fold_build2 (MULT_EXPR, diff_type, s, tile_size); s = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); } /* Determine the range, avoiding possible unsigned->signed overflow. */ negating = !up && TYPE_UNSIGNED (iter_type); expr = fold_build2 (MINUS_EXPR, plus_type, fold_convert (plus_type, negating ? b : e), fold_convert (plus_type, negating ? e : b)); expr = fold_convert (diff_type, expr); if (negating) expr = fold_build1 (NEGATE_EXPR, diff_type, expr); tree range = force_gimple_operand_gsi (&gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT); chunk_no = build_int_cst (diff_type, 0); if (chunking) { gcc_assert (!gimple_in_ssa_p (cfun)); expr = chunk_no; chunk_max = create_tmp_var (diff_type, ".chunk_max"); chunk_no = create_tmp_var (diff_type, ".chunk_no"); ass = gimple_build_assign (chunk_no, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, build_int_cst (integer_type_node, IFN_GOACC_LOOP_CHUNKS), dir, range, s, chunk_size, gwv); gimple_call_set_lhs (call, chunk_max); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); } else chunk_size = chunk_no; call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, build_int_cst (integer_type_node, IFN_GOACC_LOOP_STEP), dir, range, s, chunk_size, gwv); gimple_call_set_lhs (call, step); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); /* Remove the GIMPLE_OMP_FOR. */ gsi_remove (&gsi, true); /* Fixup edges from head_bb. */ be = BRANCH_EDGE (head_bb); fte = FALLTHRU_EDGE (head_bb); be->flags |= EDGE_FALSE_VALUE; fte->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE; basic_block body_bb = fte->dest; if (gimple_in_ssa_p (cfun)) { gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); offset = gimple_omp_continue_control_use (cont_stmt); offset_incr = gimple_omp_continue_control_def (cont_stmt); } else { offset = create_tmp_var (diff_type, ".offset"); offset_init = offset_incr = offset; } bound = create_tmp_var (TREE_TYPE (offset), ".bound"); /* Loop offset & bound go into head_bb. */ gsi = gsi_start_bb (head_bb); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, build_int_cst (integer_type_node, IFN_GOACC_LOOP_OFFSET), dir, range, s, chunk_size, gwv, chunk_no); gimple_call_set_lhs (call, offset_init); gimple_set_location (call, loc); gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, build_int_cst (integer_type_node, IFN_GOACC_LOOP_BOUND), dir, range, s, chunk_size, gwv, offset_init); gimple_call_set_lhs (call, bound); gimple_set_location (call, loc); gsi_insert_after (&gsi, call, GSI_CONTINUE_LINKING); expr = build2 (cond_code, boolean_type_node, offset_init, bound); gsi_insert_after (&gsi, gimple_build_cond_empty (expr), GSI_CONTINUE_LINKING); /* V assignment goes into body_bb. */ if (!gimple_in_ssa_p (cfun)) { gsi = gsi_start_bb (body_bb); expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, offset)); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (v, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); if (fd->collapse > 1 || fd->tiling) expand_oacc_collapse_vars (fd, false, &gsi, counts, v); if (fd->tiling) { /* Determine the range of the element loop -- usually simply the tile_size, but could be smaller if the final iteration of the outer loop is a partial tile. */ tree e_range = create_tmp_var (diff_type, ".e_range"); expr = build2 (MIN_EXPR, diff_type, build2 (MINUS_EXPR, diff_type, bound, offset), build2 (MULT_EXPR, diff_type, tile_size, element_s)); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (e_range, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); /* Determine bound, offset & step of inner loop. */ e_bound = create_tmp_var (diff_type, ".e_bound"); e_offset = create_tmp_var (diff_type, ".e_offset"); e_step = create_tmp_var (diff_type, ".e_step"); /* Mark these as element loops. */ tree t, e_gwv = integer_minus_one_node; tree chunk = build_int_cst (diff_type, 0); /* Never chunked. */ t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_OFFSET); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range, element_s, chunk, e_gwv, chunk); gimple_call_set_lhs (call, e_offset); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_BOUND); call = gimple_build_call_internal (IFN_GOACC_LOOP, 7, t, dir, e_range, element_s, chunk, e_gwv, e_offset); gimple_call_set_lhs (call, e_bound); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); t = build_int_cst (integer_type_node, IFN_GOACC_LOOP_STEP); call = gimple_build_call_internal (IFN_GOACC_LOOP, 6, t, dir, e_range, element_s, chunk, e_gwv); gimple_call_set_lhs (call, e_step); gimple_set_location (call, loc); gsi_insert_before (&gsi, call, GSI_SAME_STMT); /* Add test and split block. */ expr = build2 (cond_code, boolean_type_node, e_offset, e_bound); stmt = gimple_build_cond_empty (expr); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); split = split_block (body_bb, stmt); elem_body_bb = split->dest; if (cont_bb == body_bb) cont_bb = elem_body_bb; body_bb = split->src; split->flags ^= EDGE_FALLTHRU | EDGE_TRUE_VALUE; /* Add a dummy exit for the tiled block when cont_bb is missing. */ if (cont_bb == NULL) { edge e = make_edge (body_bb, exit_bb, EDGE_FALSE_VALUE); e->probability = profile_probability::even (); split->probability = profile_probability::even (); } /* Initialize the user's loop vars. */ gsi = gsi_start_bb (elem_body_bb); expand_oacc_collapse_vars (fd, true, &gsi, counts, e_offset); } } /* Loop increment goes into cont_bb. If this is not a loop, we will have spawned threads as if it was, and each one will execute one iteration. The specification is not explicit about whether such constructs are ill-formed or not, and they can occur, especially when noreturn routines are involved. */ if (cont_bb) { gsi = gsi_last_nondebug_bb (cont_bb); gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi)); loc = gimple_location (cont_stmt); if (fd->tiling) { /* Insert element loop increment and test. */ expr = build2 (PLUS_EXPR, diff_type, e_offset, e_step); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (e_offset, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); expr = build2 (cond_code, boolean_type_node, e_offset, e_bound); stmt = gimple_build_cond_empty (expr); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); split = split_block (cont_bb, stmt); elem_cont_bb = split->src; cont_bb = split->dest; split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; split->probability = profile_probability::unlikely ().guessed (); edge latch_edge = make_edge (elem_cont_bb, elem_body_bb, EDGE_TRUE_VALUE); latch_edge->probability = profile_probability::likely ().guessed (); edge skip_edge = make_edge (body_bb, cont_bb, EDGE_FALSE_VALUE); skip_edge->probability = profile_probability::unlikely ().guessed (); edge loop_entry_edge = EDGE_SUCC (body_bb, 1 - skip_edge->dest_idx); loop_entry_edge->probability = profile_probability::likely ().guessed (); gsi = gsi_for_stmt (cont_stmt); } /* Increment offset. */ if (gimple_in_ssa_p (cfun)) expr = build2 (plus_code, iter_type, offset, fold_convert (plus_type, step)); else expr = build2 (PLUS_EXPR, diff_type, offset, step); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (offset_incr, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); expr = build2 (cond_code, boolean_type_node, offset_incr, bound); gsi_insert_before (&gsi, gimple_build_cond_empty (expr), GSI_SAME_STMT); /* Remove the GIMPLE_OMP_CONTINUE. */ gsi_remove (&gsi, true); /* Fixup edges from cont_bb. */ be = BRANCH_EDGE (cont_bb); fte = FALLTHRU_EDGE (cont_bb); be->flags |= EDGE_TRUE_VALUE; fte->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; if (chunking) { /* Split the beginning of exit_bb to make bottom_bb. We need to insert a nop at the start, because splitting is after a stmt, not before. */ gsi = gsi_start_bb (exit_bb); stmt = gimple_build_nop (); gsi_insert_before (&gsi, stmt, GSI_SAME_STMT); split = split_block (exit_bb, stmt); bottom_bb = split->src; exit_bb = split->dest; gsi = gsi_last_bb (bottom_bb); /* Chunk increment and test goes into bottom_bb. */ expr = build2 (PLUS_EXPR, diff_type, chunk_no, build_int_cst (diff_type, 1)); ass = gimple_build_assign (chunk_no, expr); gsi_insert_after (&gsi, ass, GSI_CONTINUE_LINKING); /* Chunk test at end of bottom_bb. */ expr = build2 (LT_EXPR, boolean_type_node, chunk_no, chunk_max); gsi_insert_after (&gsi, gimple_build_cond_empty (expr), GSI_CONTINUE_LINKING); /* Fixup edges from bottom_bb. */ split->flags ^= EDGE_FALLTHRU | EDGE_FALSE_VALUE; split->probability = profile_probability::unlikely ().guessed (); edge latch_edge = make_edge (bottom_bb, head_bb, EDGE_TRUE_VALUE); latch_edge->probability = profile_probability::likely ().guessed (); } } gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); loc = gimple_location (gsi_stmt (gsi)); if (!gimple_in_ssa_p (cfun)) { /* Insert the final value of V, in case it is live. This is the value for the only thread that survives past the join. */ expr = fold_build2 (MINUS_EXPR, diff_type, range, dir); expr = fold_build2 (PLUS_EXPR, diff_type, expr, s); expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s); expr = fold_build2 (MULT_EXPR, diff_type, expr, s); expr = build2 (plus_code, iter_type, b, fold_convert (plus_type, expr)); expr = force_gimple_operand_gsi (&gsi, expr, false, NULL_TREE, true, GSI_SAME_STMT); ass = gimple_build_assign (v, expr); gsi_insert_before (&gsi, ass, GSI_SAME_STMT); } /* Remove the OMP_RETURN. */ gsi_remove (&gsi, true); if (cont_bb) { /* We now have one, two or three nested loops. Update the loop structures. */ struct loop *parent = entry_bb->loop_father; struct loop *body = body_bb->loop_father; if (chunking) { struct loop *chunk_loop = alloc_loop (); chunk_loop->header = head_bb; chunk_loop->latch = bottom_bb; add_loop (chunk_loop, parent); parent = chunk_loop; } else if (parent != body) { gcc_assert (body->header == body_bb); gcc_assert (body->latch == cont_bb || single_pred (body->latch) == cont_bb); parent = NULL; } if (parent) { struct loop *body_loop = alloc_loop (); body_loop->header = body_bb; body_loop->latch = cont_bb; add_loop (body_loop, parent); if (fd->tiling) { /* Insert tiling's element loop. */ struct loop *inner_loop = alloc_loop (); inner_loop->header = elem_body_bb; inner_loop->latch = elem_cont_bb; add_loop (inner_loop, body_loop); } } } } /* Expand the OMP loop defined by REGION. */ static void expand_omp_for (struct omp_region *region, gimple *inner_stmt) { struct omp_for_data fd; struct omp_for_data_loop *loops; loops = (struct omp_for_data_loop *) alloca (gimple_omp_for_collapse (last_stmt (region->entry)) * sizeof (struct omp_for_data_loop)); omp_extract_for_data (as_a <gomp_for *> (last_stmt (region->entry)), &fd, loops); region->sched_kind = fd.sched_kind; region->sched_modifiers = fd.sched_modifiers; gcc_assert (EDGE_COUNT (region->entry->succs) == 2); BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL; if (region->cont) { gcc_assert (EDGE_COUNT (region->cont->succs) == 2); BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL; } else /* If there isn't a continue then this is a degerate case where the introduction of abnormal edges during lowering will prevent original loops from being detected. Fix that up. */ loops_state_set (LOOPS_NEED_FIXUP); if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_SIMD) expand_omp_simd (region, &fd); else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_OACC_LOOP) { gcc_assert (!inner_stmt); expand_oacc_for (region, &fd); } else if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_TASKLOOP) { if (gimple_omp_for_combined_into_p (fd.for_stmt)) expand_omp_taskloop_for_inner (region, &fd, inner_stmt); else expand_omp_taskloop_for_outer (region, &fd, inner_stmt); } else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC && !fd.have_ordered) { if (fd.chunk_size == NULL) expand_omp_for_static_nochunk (region, &fd, inner_stmt); else expand_omp_for_static_chunk (region, &fd, inner_stmt); } else { int fn_index, start_ix, next_ix; gcc_assert (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_FOR); if (fd.chunk_size == NULL && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC) fd.chunk_size = integer_zero_node; gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO); switch (fd.sched_kind) { case OMP_CLAUSE_SCHEDULE_RUNTIME: fn_index = 3; break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: case OMP_CLAUSE_SCHEDULE_GUIDED: if ((fd.sched_modifiers & OMP_CLAUSE_SCHEDULE_NONMONOTONIC) && !fd.ordered && !fd.have_ordered) { fn_index = 3 + fd.sched_kind; break; } /* FALLTHRU */ default: fn_index = fd.sched_kind; break; } if (!fd.ordered) fn_index += fd.have_ordered * 6; if (fd.ordered) start_ix = ((int)BUILT_IN_GOMP_LOOP_DOACROSS_STATIC_START) + fn_index; else start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index; next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index; if (fd.iter_type == long_long_unsigned_type_node) { start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START - (int)BUILT_IN_GOMP_LOOP_STATIC_START); next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT); } expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix, (enum built_in_function) next_ix, inner_stmt); } if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_only_virtuals); } /* Expand code for an OpenMP sections directive. In pseudo code, we generate v = GOMP_sections_start (n); L0: switch (v) { case 0: goto L2; case 1: section 1; goto L1; case 2: ... case n: ... default: abort (); } L1: v = GOMP_sections_next (); goto L0; L2: reduction; If this is a combined parallel sections, replace the call to GOMP_sections_start with call to GOMP_sections_next. */ static void expand_omp_sections (struct omp_region *region) { tree t, u, vin = NULL, vmain, vnext, l2; unsigned len; basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb; gimple_stmt_iterator si, switch_si; gomp_sections *sections_stmt; gimple *stmt; gomp_continue *cont; edge_iterator ei; edge e; struct omp_region *inner; unsigned i, casei; bool exit_reachable = region->cont != NULL; gcc_assert (region->exit != NULL); entry_bb = region->entry; l0_bb = single_succ (entry_bb); l1_bb = region->cont; l2_bb = region->exit; if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb) l2 = gimple_block_label (l2_bb); else { /* This can happen if there are reductions. */ len = EDGE_COUNT (l0_bb->succs); gcc_assert (len > 0); e = EDGE_SUCC (l0_bb, len - 1); si = gsi_last_nondebug_bb (e->dest); l2 = NULL_TREE; if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) l2 = gimple_block_label (e->dest); else FOR_EACH_EDGE (e, ei, l0_bb->succs) { si = gsi_last_nondebug_bb (e->dest); if (gsi_end_p (si) || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION) { l2 = gimple_block_label (e->dest); break; } } } if (exit_reachable) default_bb = create_empty_bb (l1_bb->prev_bb); else default_bb = create_empty_bb (l0_bb); /* We will build a switch() with enough cases for all the GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work and a default case to abort if something goes wrong. */ len = EDGE_COUNT (l0_bb->succs); /* Use vec::quick_push on label_vec throughout, since we know the size in advance. */ auto_vec<tree> label_vec (len); /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the GIMPLE_OMP_SECTIONS statement. */ si = gsi_last_nondebug_bb (entry_bb); sections_stmt = as_a <gomp_sections *> (gsi_stmt (si)); gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS); vin = gimple_omp_sections_control (sections_stmt); if (!is_combined_parallel (region)) { /* If we are not inside a combined parallel+sections region, call GOMP_sections_start. */ t = build_int_cst (unsigned_type_node, len - 1); u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START); stmt = gimple_build_call (u, 1, t); } else { /* Otherwise, call GOMP_sections_next. */ u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (u, 0); } gimple_call_set_lhs (stmt, vin); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in L0_BB. */ switch_si = gsi_last_nondebug_bb (l0_bb); gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH); if (exit_reachable) { cont = as_a <gomp_continue *> (last_stmt (l1_bb)); gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE); vmain = gimple_omp_continue_control_use (cont); vnext = gimple_omp_continue_control_def (cont); } else { vmain = vin; vnext = NULL_TREE; } t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2); label_vec.quick_push (t); i = 1; /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */ for (inner = region->inner, casei = 1; inner; inner = inner->next, i++, casei++) { basic_block s_entry_bb, s_exit_bb; /* Skip optional reduction region. */ if (inner->type == GIMPLE_OMP_ATOMIC_LOAD) { --i; --casei; continue; } s_entry_bb = inner->entry; s_exit_bb = inner->exit; t = gimple_block_label (s_entry_bb); u = build_int_cst (unsigned_type_node, casei); u = build_case_label (u, NULL, t); label_vec.quick_push (u); si = gsi_last_nondebug_bb (s_entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION); gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si))); gsi_remove (&si, true); single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU; if (s_exit_bb == NULL) continue; si = gsi_last_nondebug_bb (s_exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU; } /* Error handling code goes in DEFAULT_BB. */ t = gimple_block_label (default_bb); u = build_case_label (NULL, NULL, t); make_edge (l0_bb, default_bb, 0); add_bb_to_loop (default_bb, current_loops->tree_root); stmt = gimple_build_switch (vmain, u, label_vec); gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT); gsi_remove (&switch_si, true); si = gsi_start_bb (default_bb); stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0); gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING); if (exit_reachable) { tree bfn_decl; /* Code to get the next section goes in L1_BB. */ si = gsi_last_nondebug_bb (l1_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE); bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT); stmt = gimple_build_call (bfn_decl, 0); gimple_call_set_lhs (stmt, vnext); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU; } /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */ si = gsi_last_nondebug_bb (l2_bb); if (gimple_omp_return_nowait_p (gsi_stmt (si))) t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT); else if (gimple_omp_return_lhs (gsi_stmt (si))) t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL); else t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END); stmt = gimple_build_call (t, 0); if (gimple_omp_return_lhs (gsi_stmt (si))) gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si))); gsi_insert_after (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb); } /* Expand code for an OpenMP single directive. We've already expanded much of the code, here we simply place the GOMP_barrier call. */ static void expand_omp_single (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE); gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; si = gsi_last_nondebug_bb (exit_bb); if (!gimple_omp_return_nowait_p (gsi_stmt (si))) { tree t = gimple_omp_return_lhs (gsi_stmt (si)); gsi_insert_after (&si, omp_build_barrier (t), GSI_SAME_STMT); } gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } /* Generic expansion for OpenMP synchronization directives: master, ordered and critical. All we need to do here is remove the entry and exit markers for REGION. */ static void expand_omp_synch (struct omp_region *region) { basic_block entry_bb, exit_bb; gimple_stmt_iterator si; entry_bb = region->entry; exit_bb = region->exit; si = gsi_last_nondebug_bb (entry_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS); gsi_remove (&si, true); single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; if (exit_bb) { si = gsi_last_nondebug_bb (exit_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN); gsi_remove (&si, true); single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU; } } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile load. */ static bool expand_omp_atomic_load (basic_block load_bb, tree addr, tree loaded_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb; location_t loc; gimple *stmt; tree decl, call, type, itype; gsi = gsi_last_nondebug_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_load_optab[mode], and mode is smaller than word size, then expand_atomic_load assumes that the load is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (loaded_val); itype = TREE_TYPE (TREE_TYPE (decl)); call = build_call_expr_loc (loc, decl, 2, addr, build_int_cst (NULL, gimple_omp_atomic_seq_cst_p (stmt) ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED)); if (!useless_type_conversion_p (type, itype)) call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); store_bb = single_succ (load_bb); gsi = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a normal volatile store. */ static bool expand_omp_atomic_store (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function tmpbase; gimple_stmt_iterator gsi; basic_block store_bb = single_succ (load_bb); location_t loc; gimple *stmt; tree decl, call, type, itype; machine_mode imode; bool exchange; gsi = gsi_last_nondebug_bb (load_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD); /* If the load value is needed, then this isn't a store but an exchange. */ exchange = gimple_omp_atomic_need_value_p (stmt); gsi = gsi_last_nondebug_bb (store_bb); stmt = gsi_stmt (gsi); gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE); loc = gimple_location (stmt); /* ??? If the target does not implement atomic_store_optab[mode], and mode is smaller than word size, then expand_atomic_store assumes that the store is atomic. We could avoid the builtin entirely in this case. */ tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N); tmpbase = (enum built_in_function) ((int) tmpbase + index + 1); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; type = TREE_TYPE (stored_val); /* Dig out the type of the function's second argument. */ itype = TREE_TYPE (decl); itype = TYPE_ARG_TYPES (itype); itype = TREE_CHAIN (itype); itype = TREE_VALUE (itype); imode = TYPE_MODE (itype); if (exchange && !can_atomic_exchange_p (imode, true)) return false; if (!useless_type_conversion_p (itype, type)) stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val); call = build_call_expr_loc (loc, decl, 3, addr, stored_val, build_int_cst (NULL, gimple_omp_atomic_seq_cst_p (stmt) ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED)); if (exchange) { if (!useless_type_conversion_p (type, itype)) call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call); } force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */ gsi = gsi_last_nondebug_bb (load_bb); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Attempt to implement the atomic operation as a __atomic_fetch_op builtin. INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. Returns false if the expression is not of the proper form. */ static bool expand_omp_atomic_fetch_op (basic_block load_bb, tree addr, tree loaded_val, tree stored_val, int index) { enum built_in_function oldbase, newbase, tmpbase; tree decl, itype, call; tree lhs, rhs; basic_block store_bb = single_succ (load_bb); gimple_stmt_iterator gsi; gimple *stmt; location_t loc; enum tree_code code; bool need_old, need_new; machine_mode imode; bool seq_cst; /* We expect to find the following sequences: load_bb: GIMPLE_OMP_ATOMIC_LOAD (tmp, mem) store_bb: val = tmp OP something; (or: something OP tmp) GIMPLE_OMP_STORE (val) ???FIXME: Allow a more flexible sequence. Perhaps use data flow to pick the statements. */ gsi = gsi_after_labels (store_bb); stmt = gsi_stmt (gsi); if (is_gimple_debug (stmt)) { gsi_next_nondebug (&gsi); if (gsi_end_p (gsi)) return false; stmt = gsi_stmt (gsi); } loc = gimple_location (stmt); if (!is_gimple_assign (stmt)) return false; gsi_next_nondebug (&gsi); if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE) return false; need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi)); need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb)); seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb)); gcc_checking_assert (!need_old || !need_new); if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0)) return false; /* Check for one of the supported fetch-op operations. */ code = gimple_assign_rhs_code (stmt); switch (code) { case PLUS_EXPR: case POINTER_PLUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N; newbase = BUILT_IN_ATOMIC_ADD_FETCH_N; break; case MINUS_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N; newbase = BUILT_IN_ATOMIC_SUB_FETCH_N; break; case BIT_AND_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_AND_N; newbase = BUILT_IN_ATOMIC_AND_FETCH_N; break; case BIT_IOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_OR_N; newbase = BUILT_IN_ATOMIC_OR_FETCH_N; break; case BIT_XOR_EXPR: oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N; newbase = BUILT_IN_ATOMIC_XOR_FETCH_N; break; default: return false; } /* Make sure the expression is of the proper form. */ if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs2 (stmt); else if (commutative_tree_code (gimple_assign_rhs_code (stmt)) && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0)) rhs = gimple_assign_rhs1 (stmt); else return false; tmpbase = ((enum built_in_function) ((need_new ? newbase : oldbase) + index + 1)); decl = builtin_decl_explicit (tmpbase); if (decl == NULL_TREE) return false; itype = TREE_TYPE (TREE_TYPE (decl)); imode = TYPE_MODE (itype); /* We could test all of the various optabs involved, but the fact of the matter is that (with the exception of i486 vs i586 and xadd) all targets that support any atomic operaton optab also implements compare-and-swap. Let optabs.c take care of expanding any compare-and-swap loop. */ if (!can_compare_and_swap_p (imode, true) || !can_atomic_load_p (imode)) return false; gsi = gsi_last_nondebug_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD); /* OpenMP does not imply any barrier-like semantics on its atomic ops. It only requires that the operation happen atomically. Thus we can use the RELAXED memory model. */ call = build_call_expr_loc (loc, decl, 3, addr, fold_convert_loc (loc, itype, rhs), build_int_cst (NULL, seq_cst ? MEMMODEL_SEQ_CST : MEMMODEL_RELAXED)); if (need_old || need_new) { lhs = need_old ? loaded_val : stored_val; call = fold_convert_loc (loc, TREE_TYPE (lhs), call); call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call); } else call = fold_convert_loc (loc, void_type_node, call); force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE); gsi_remove (&gsi, true); gsi = gsi_last_nondebug_bb (store_bb); stmt = gsi_stmt (gsi); gsi_remove (&gsi, true); if (gimple_in_ssa_p (cfun)) { release_defs (stmt); update_ssa (TODO_update_ssa_no_phi); } return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: oldval = *addr; repeat: newval = rhs; // with oldval replacing *addr in rhs oldval = __sync_val_compare_and_swap (addr, oldval, newval); if (oldval != newval) goto repeat; INDEX is log2 of the size of the data type, and thus usable to find the index of the builtin decl. */ static bool expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val, int index) { tree loadedi, storedi, initial, new_storedi, old_vali; tree type, itype, cmpxchg, iaddr, atype; gimple_stmt_iterator si; basic_block loop_header = single_succ (load_bb); gimple *phi, *stmt; edge e; enum built_in_function fncode; /* ??? We need a non-pointer interface to __atomic_compare_exchange in order to use the RELAXED memory model effectively. */ fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N + index + 1); cmpxchg = builtin_decl_explicit (fncode); if (cmpxchg == NULL_TREE) return false; type = TYPE_MAIN_VARIANT (TREE_TYPE (loaded_val)); atype = type; itype = TREE_TYPE (TREE_TYPE (cmpxchg)); if (!can_compare_and_swap_p (TYPE_MODE (itype), true) || !can_atomic_load_p (TYPE_MODE (itype))) return false; /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */ si = gsi_last_nondebug_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); /* For floating-point values, we'll need to view-convert them to integers so that we can perform the atomic compare and swap. Simplify the following code by always setting up the "i"ntegral variables. */ if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) { tree iaddr_val; iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode, true)); atype = itype; iaddr_val = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (iaddr), addr), false, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (iaddr, iaddr_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); loadedi = create_tmp_var (itype); if (gimple_in_ssa_p (cfun)) loadedi = make_ssa_name (loadedi); } else { iaddr = addr; loadedi = loaded_val; } fncode = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1); tree loaddecl = builtin_decl_explicit (fncode); if (loaddecl) initial = fold_convert (atype, build_call_expr (loaddecl, 2, iaddr, build_int_cst (NULL_TREE, MEMMODEL_RELAXED))); else { tree off = build_int_cst (build_pointer_type_for_mode (atype, ptr_mode, true), 0); initial = build2 (MEM_REF, atype, iaddr, off); } initial = force_gimple_operand_gsi (&si, initial, true, NULL_TREE, true, GSI_SAME_STMT); /* Move the value to the LOADEDI temporary. */ if (gimple_in_ssa_p (cfun)) { gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header))); phi = create_phi_node (loadedi, loop_header); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)), initial); } else gsi_insert_before (&si, gimple_build_assign (loadedi, initial), GSI_SAME_STMT); if (loadedi != loaded_val) { gimple_stmt_iterator gsi2; tree x; x = build1 (VIEW_CONVERT_EXPR, type, loadedi); gsi2 = gsi_start_bb (loop_header); if (gimple_in_ssa_p (cfun)) { gassign *stmt; x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); stmt = gimple_build_assign (loaded_val, x); gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT); } else { x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x); force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE, true, GSI_SAME_STMT); } } gsi_remove (&si, true); si = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); if (iaddr == addr) storedi = stored_val; else storedi = force_gimple_operand_gsi (&si, build1 (VIEW_CONVERT_EXPR, itype, stored_val), true, NULL_TREE, true, GSI_SAME_STMT); /* Build the compare&swap statement. */ new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi); new_storedi = force_gimple_operand_gsi (&si, fold_convert (TREE_TYPE (loadedi), new_storedi), true, NULL_TREE, true, GSI_SAME_STMT); if (gimple_in_ssa_p (cfun)) old_vali = loadedi; else { old_vali = create_tmp_var (TREE_TYPE (loadedi)); stmt = gimple_build_assign (old_vali, loadedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); stmt = gimple_build_assign (loadedi, new_storedi); gsi_insert_before (&si, stmt, GSI_SAME_STMT); } /* Note that we always perform the comparison as an integer, even for floating point. This allows the atomic operation to properly succeed even with NaNs and -0.0. */ tree ne = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali); stmt = gimple_build_cond_empty (ne); gsi_insert_before (&si, stmt, GSI_SAME_STMT); /* Update cfg. */ e = single_succ_edge (store_bb); e->flags &= ~EDGE_FALLTHRU; e->flags |= EDGE_FALSE_VALUE; /* Expect no looping. */ e->probability = profile_probability::guessed_always (); e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE); e->probability = profile_probability::guessed_never (); /* Copy the new value to loadedi (we already did that before the condition if we are not in SSA). */ if (gimple_in_ssa_p (cfun)) { phi = gimple_seq_first_stmt (phi_nodes (loop_header)); SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi); } /* Remove GIMPLE_OMP_ATOMIC_STORE. */ gsi_remove (&si, true); struct loop *loop = alloc_loop (); loop->header = loop_header; loop->latch = store_bb; add_loop (loop, loop_header->loop_father); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* A subroutine of expand_omp_atomic. Implement the atomic operation as: GOMP_atomic_start (); *addr = rhs; GOMP_atomic_end (); The result is not globally atomic, but works so long as all parallel references are within #pragma omp atomic directives. According to responses received from omp@openmp.org, appears to be within spec. Which makes sense, since that's how several other compilers handle this situation as well. LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're expanding. STORED_VAL is the operand of the matching GIMPLE_OMP_ATOMIC_STORE. We replace GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with loaded_val = *addr; and replace GIMPLE_OMP_ATOMIC_STORE (stored_val) with *addr = stored_val; */ static bool expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb, tree addr, tree loaded_val, tree stored_val) { gimple_stmt_iterator si; gassign *stmt; tree t; si = gsi_last_nondebug_bb (load_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); tree mem = build_simple_mem_ref (addr); TREE_TYPE (mem) = TREE_TYPE (loaded_val); TREE_OPERAND (mem, 1) = fold_convert (build_pointer_type_for_mode (TREE_TYPE (mem), ptr_mode, true), TREE_OPERAND (mem, 1)); stmt = gimple_build_assign (loaded_val, mem); gsi_insert_before (&si, stmt, GSI_SAME_STMT); gsi_remove (&si, true); si = gsi_last_nondebug_bb (store_bb); gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE); stmt = gimple_build_assign (unshare_expr (mem), stored_val); gsi_insert_before (&si, stmt, GSI_SAME_STMT); t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END); t = build_call_expr (t, 0); force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT); gsi_remove (&si, true); if (gimple_in_ssa_p (cfun)) update_ssa (TODO_update_ssa_no_phi); return true; } /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand using expand_omp_atomic_fetch_op. If it failed, we try to call expand_omp_atomic_pipeline, and if it fails too, the ultimate fallback is wrapping the operation in a mutex (expand_omp_atomic_mutex). REGION is the atomic region built by build_omp_regions_1(). */ static void expand_omp_atomic (struct omp_region *region) { basic_block load_bb = region->entry, store_bb = region->exit; gomp_atomic_load *load = as_a <gomp_atomic_load *> (last_stmt (load_bb)); gomp_atomic_store *store = as_a <gomp_atomic_store *> (last_stmt (store_bb)); tree loaded_val = gimple_omp_atomic_load_lhs (load); tree addr = gimple_omp_atomic_load_rhs (load); tree stored_val = gimple_omp_atomic_store_val (store); tree type = TYPE_MAIN_VARIANT (TREE_TYPE (loaded_val)); HOST_WIDE_INT index; /* Make sure the type is one of the supported sizes. */ index = tree_to_uhwi (TYPE_SIZE_UNIT (type)); index = exact_log2 (index); if (index >= 0 && index <= 4) { unsigned int align = TYPE_ALIGN_UNIT (type); /* __sync builtins require strict data alignment. */ if (exact_log2 (align) >= index) { /* Atomic load. */ scalar_mode smode; if (loaded_val == stored_val && (is_int_mode (TYPE_MODE (type), &smode) || is_float_mode (TYPE_MODE (type), &smode)) && GET_MODE_BITSIZE (smode) <= BITS_PER_WORD && expand_omp_atomic_load (load_bb, addr, loaded_val, index)) return; /* Atomic store. */ if ((is_int_mode (TYPE_MODE (type), &smode) || is_float_mode (TYPE_MODE (type), &smode)) && GET_MODE_BITSIZE (smode) <= BITS_PER_WORD && store_bb == single_succ (load_bb) && first_stmt (store_bb) == store && expand_omp_atomic_store (load_bb, addr, loaded_val, stored_val, index)) return; /* When possible, use specialized atomic update functions. */ if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)) && store_bb == single_succ (load_bb) && expand_omp_atomic_fetch_op (load_bb, addr, loaded_val, stored_val, index)) return; /* If we don't have specialized __sync builtins, try and implement as a compare and swap loop. */ if (expand_omp_atomic_pipeline (load_bb, store_bb, addr, loaded_val, stored_val, index)) return; } } /* The ultimate fallback is wrapping the operation in a mutex. */ expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val); } /* Mark the loops inside the kernels region starting at REGION_ENTRY and ending at REGION_EXIT. */ static void mark_loops_in_oacc_kernels_region (basic_block region_entry, basic_block region_exit) { struct loop *outer = region_entry->loop_father; gcc_assert (region_exit == NULL || outer == region_exit->loop_father); /* Don't parallelize the kernels region if it contains more than one outer loop. */ unsigned int nr_outer_loops = 0; struct loop *single_outer = NULL; for (struct loop *loop = outer->inner; loop != NULL; loop = loop->next) { gcc_assert (loop_outer (loop) == outer); if (!dominated_by_p (CDI_DOMINATORS, loop->header, region_entry)) continue; if (region_exit != NULL && dominated_by_p (CDI_DOMINATORS, loop->header, region_exit)) continue; nr_outer_loops++; single_outer = loop; } if (nr_outer_loops != 1) return; for (struct loop *loop = single_outer->inner; loop != NULL; loop = loop->inner) if (loop->next) return; /* Mark the loops in the region. */ for (struct loop *loop = single_outer; loop != NULL; loop = loop->inner) loop->in_oacc_kernels_region = true; } /* Types used to pass grid and wortkgroup sizes to kernel invocation. */ struct GTY(()) grid_launch_attributes_trees { tree kernel_dim_array_type; tree kernel_lattrs_dimnum_decl; tree kernel_lattrs_grid_decl; tree kernel_lattrs_group_decl; tree kernel_launch_attributes_type; }; static GTY(()) struct grid_launch_attributes_trees *grid_attr_trees; /* Create types used to pass kernel launch attributes to target. */ static void grid_create_kernel_launch_attr_types (void) { if (grid_attr_trees) return; grid_attr_trees = ggc_alloc <grid_launch_attributes_trees> (); tree dim_arr_index_type = build_index_type (build_int_cst (integer_type_node, 2)); grid_attr_trees->kernel_dim_array_type = build_array_type (uint32_type_node, dim_arr_index_type); grid_attr_trees->kernel_launch_attributes_type = make_node (RECORD_TYPE); grid_attr_trees->kernel_lattrs_dimnum_decl = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("ndim"), uint32_type_node); DECL_CHAIN (grid_attr_trees->kernel_lattrs_dimnum_decl) = NULL_TREE; grid_attr_trees->kernel_lattrs_grid_decl = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("grid_size"), grid_attr_trees->kernel_dim_array_type); DECL_CHAIN (grid_attr_trees->kernel_lattrs_grid_decl) = grid_attr_trees->kernel_lattrs_dimnum_decl; grid_attr_trees->kernel_lattrs_group_decl = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("group_size"), grid_attr_trees->kernel_dim_array_type); DECL_CHAIN (grid_attr_trees->kernel_lattrs_group_decl) = grid_attr_trees->kernel_lattrs_grid_decl; finish_builtin_struct (grid_attr_trees->kernel_launch_attributes_type, "__gomp_kernel_launch_attributes", grid_attr_trees->kernel_lattrs_group_decl, NULL_TREE); } /* Insert before the current statement in GSI a store of VALUE to INDEX of array (of type kernel_dim_array_type) FLD_DECL of RANGE_VAR. VALUE must be of type uint32_type_node. */ static void grid_insert_store_range_dim (gimple_stmt_iterator *gsi, tree range_var, tree fld_decl, int index, tree value) { tree ref = build4 (ARRAY_REF, uint32_type_node, build3 (COMPONENT_REF, grid_attr_trees->kernel_dim_array_type, range_var, fld_decl, NULL_TREE), build_int_cst (integer_type_node, index), NULL_TREE, NULL_TREE); gsi_insert_before (gsi, gimple_build_assign (ref, value), GSI_SAME_STMT); } /* Return a tree representation of a pointer to a structure with grid and work-group size information. Statements filling that information will be inserted before GSI, TGT_STMT is the target statement which has the necessary information in it. */ static tree grid_get_kernel_launch_attributes (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt) { grid_create_kernel_launch_attr_types (); tree lattrs = create_tmp_var (grid_attr_trees->kernel_launch_attributes_type, "__kernel_launch_attrs"); unsigned max_dim = 0; for (tree clause = gimple_omp_target_clauses (tgt_stmt); clause; clause = OMP_CLAUSE_CHAIN (clause)) { if (OMP_CLAUSE_CODE (clause) != OMP_CLAUSE__GRIDDIM_) continue; unsigned dim = OMP_CLAUSE__GRIDDIM__DIMENSION (clause); max_dim = MAX (dim, max_dim); grid_insert_store_range_dim (gsi, lattrs, grid_attr_trees->kernel_lattrs_grid_decl, dim, OMP_CLAUSE__GRIDDIM__SIZE (clause)); grid_insert_store_range_dim (gsi, lattrs, grid_attr_trees->kernel_lattrs_group_decl, dim, OMP_CLAUSE__GRIDDIM__GROUP (clause)); } tree dimref = build3 (COMPONENT_REF, uint32_type_node, lattrs, grid_attr_trees->kernel_lattrs_dimnum_decl, NULL_TREE); gcc_checking_assert (max_dim <= 2); tree dimensions = build_int_cstu (uint32_type_node, max_dim + 1); gsi_insert_before (gsi, gimple_build_assign (dimref, dimensions), GSI_SAME_STMT); TREE_ADDRESSABLE (lattrs) = 1; return build_fold_addr_expr (lattrs); } /* Build target argument identifier from the DEVICE identifier, value identifier ID and whether the element also has a SUBSEQUENT_PARAM. */ static tree get_target_argument_identifier_1 (int device, bool subseqent_param, int id) { tree t = build_int_cst (integer_type_node, device); if (subseqent_param) t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t, build_int_cst (integer_type_node, GOMP_TARGET_ARG_SUBSEQUENT_PARAM)); t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t, build_int_cst (integer_type_node, id)); return t; } /* Like above but return it in type that can be directly stored as an element of the argument array. */ static tree get_target_argument_identifier (int device, bool subseqent_param, int id) { tree t = get_target_argument_identifier_1 (device, subseqent_param, id); return fold_convert (ptr_type_node, t); } /* Return a target argument consisting of DEVICE identifier, value identifier ID, and the actual VALUE. */ static tree get_target_argument_value (gimple_stmt_iterator *gsi, int device, int id, tree value) { tree t = fold_build2 (LSHIFT_EXPR, integer_type_node, fold_convert (integer_type_node, value), build_int_cst (unsigned_type_node, GOMP_TARGET_ARG_VALUE_SHIFT)); t = fold_build2 (BIT_IOR_EXPR, integer_type_node, t, get_target_argument_identifier_1 (device, false, id)); t = fold_convert (ptr_type_node, t); return force_gimple_operand_gsi (gsi, t, true, NULL, true, GSI_SAME_STMT); } /* If VALUE is an integer constant greater than -2^15 and smaller than 2^15, push one argument to ARGS with both the DEVICE, ID and VALUE embedded in it, otherwise push an identifier (with DEVICE and ID) and the VALUE in two arguments. */ static void push_target_argument_according_to_value (gimple_stmt_iterator *gsi, int device, int id, tree value, vec <tree> *args) { if (tree_fits_shwi_p (value) && tree_to_shwi (value) > -(1 << 15) && tree_to_shwi (value) < (1 << 15)) args->quick_push (get_target_argument_value (gsi, device, id, value)); else { args->quick_push (get_target_argument_identifier (device, true, id)); value = fold_convert (ptr_type_node, value); value = force_gimple_operand_gsi (gsi, value, true, NULL, true, GSI_SAME_STMT); args->quick_push (value); } } /* Create an array of arguments that is then passed to GOMP_target. */ static tree get_target_arguments (gimple_stmt_iterator *gsi, gomp_target *tgt_stmt) { auto_vec <tree, 6> args; tree clauses = gimple_omp_target_clauses (tgt_stmt); tree t, c = omp_find_clause (clauses, OMP_CLAUSE_NUM_TEAMS); if (c) t = OMP_CLAUSE_NUM_TEAMS_EXPR (c); else t = integer_minus_one_node; push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL, GOMP_TARGET_ARG_NUM_TEAMS, t, &args); c = omp_find_clause (clauses, OMP_CLAUSE_THREAD_LIMIT); if (c) t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c); else t = integer_minus_one_node; push_target_argument_according_to_value (gsi, GOMP_TARGET_ARG_DEVICE_ALL, GOMP_TARGET_ARG_THREAD_LIMIT, t, &args); /* Add HSA-specific grid sizes, if available. */ if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)) { int id = GOMP_TARGET_ARG_HSA_KERNEL_ATTRIBUTES; t = get_target_argument_identifier (GOMP_DEVICE_HSA, true, id); args.quick_push (t); args.quick_push (grid_get_kernel_launch_attributes (gsi, tgt_stmt)); } /* Produce more, perhaps device specific, arguments here. */ tree argarray = create_tmp_var (build_array_type_nelts (ptr_type_node, args.length () + 1), ".omp_target_args"); for (unsigned i = 0; i < args.length (); i++) { tree ref = build4 (ARRAY_REF, ptr_type_node, argarray, build_int_cst (integer_type_node, i), NULL_TREE, NULL_TREE); gsi_insert_before (gsi, gimple_build_assign (ref, args[i]), GSI_SAME_STMT); } tree ref = build4 (ARRAY_REF, ptr_type_node, argarray, build_int_cst (integer_type_node, args.length ()), NULL_TREE, NULL_TREE); gsi_insert_before (gsi, gimple_build_assign (ref, null_pointer_node), GSI_SAME_STMT); TREE_ADDRESSABLE (argarray) = 1; return build_fold_addr_expr (argarray); } /* Expand the GIMPLE_OMP_TARGET starting at REGION. */ static void expand_omp_target (struct omp_region *region) { basic_block entry_bb, exit_bb, new_bb; struct function *child_cfun; tree child_fn, block, t; gimple_stmt_iterator gsi; gomp_target *entry_stmt; gimple *stmt; edge e; bool offloaded, data_region; entry_stmt = as_a <gomp_target *> (last_stmt (region->entry)); new_bb = region->entry; offloaded = is_gimple_omp_offloaded (entry_stmt); switch (gimple_omp_target_kind (entry_stmt)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_ENTER_DATA: case GF_OMP_TARGET_KIND_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_DECLARE: data_region = false; break; case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: data_region = true; break; default: gcc_unreachable (); } child_fn = NULL_TREE; child_cfun = NULL; if (offloaded) { child_fn = gimple_omp_target_child_fn (entry_stmt); child_cfun = DECL_STRUCT_FUNCTION (child_fn); } /* Supported by expand_omp_taskreg, but not here. */ if (child_cfun != NULL) gcc_checking_assert (!child_cfun->cfg); gcc_checking_assert (!gimple_in_ssa_p (cfun)); entry_bb = region->entry; exit_bb = region->exit; if (gimple_omp_target_kind (entry_stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS) { mark_loops_in_oacc_kernels_region (region->entry, region->exit); /* Further down, both OpenACC kernels and OpenACC parallel constructs will be mappted to BUILT_IN_GOACC_PARALLEL, and to distinguish the two, there is an "oacc kernels" attribute set for OpenACC kernels. */ DECL_ATTRIBUTES (child_fn) = tree_cons (get_identifier ("oacc kernels"), NULL_TREE, DECL_ATTRIBUTES (child_fn)); } if (offloaded) { unsigned srcidx, dstidx, num; /* If the offloading region needs data sent from the parent function, then the very first statement (except possible tree profile counter updates) of the offloading body is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since &.OMP_DATA_O is passed as an argument to the child function, we need to replace it with the argument as seen by the child function. In most cases, this will end up being the identity assignment .OMP_DATA_I = .OMP_DATA_I. However, if the offloading body had a function call that has been inlined, the original PARM_DECL .OMP_DATA_I may have been converted into a different local variable. In which case, we need to keep the assignment. */ tree data_arg = gimple_omp_target_data_arg (entry_stmt); if (data_arg) { basic_block entry_succ_bb = single_succ (entry_bb); gimple_stmt_iterator gsi; tree arg; gimple *tgtcopy_stmt = NULL; tree sender = TREE_VEC_ELT (data_arg, 0); for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi)) { gcc_assert (!gsi_end_p (gsi)); stmt = gsi_stmt (gsi); if (gimple_code (stmt) != GIMPLE_ASSIGN) continue; if (gimple_num_ops (stmt) == 2) { tree arg = gimple_assign_rhs1 (stmt); /* We're ignoring the subcode because we're effectively doing a STRIP_NOPS. */ if (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == sender) { tgtcopy_stmt = stmt; break; } } } gcc_assert (tgtcopy_stmt != NULL); arg = DECL_ARGUMENTS (child_fn); gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg); gsi_remove (&gsi, true); } /* Declare local variables needed in CHILD_CFUN. */ block = DECL_INITIAL (child_fn); BLOCK_VARS (block) = vec2chain (child_cfun->local_decls); /* The gimplifier could record temporaries in the offloading block rather than in containing function's local_decls chain, which would mean cgraph missed finalizing them. Do it now. */ for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t)) if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t)) varpool_node::finalize_decl (t); DECL_SAVED_TREE (child_fn) = NULL; /* We'll create a CFG for child_fn, so no gimple body is needed. */ gimple_set_body (child_fn, NULL); TREE_USED (block) = 1; /* Reset DECL_CONTEXT on function arguments. */ for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = child_fn; /* Split ENTRY_BB at GIMPLE_*, so that it can be moved to the child function. */ gsi = gsi_last_nondebug_bb (entry_bb); stmt = gsi_stmt (gsi); gcc_assert (stmt && gimple_code (stmt) == gimple_code (entry_stmt)); e = split_block (entry_bb, stmt); gsi_remove (&gsi, true); entry_bb = e->dest; single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU; /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */ if (exit_bb) { gsi = gsi_last_nondebug_bb (exit_bb); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); } /* Make sure to generate early debug for the function before outlining anything. */ if (! gimple_in_ssa_p (cfun)) (*debug_hooks->early_global_decl) (cfun->decl); /* Move the offloading region into CHILD_CFUN. */ block = gimple_block (entry_stmt); new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block); if (exit_bb) single_succ_edge (new_bb)->flags = EDGE_FALLTHRU; /* When the OMP expansion process cannot guarantee an up-to-date loop tree arrange for the child function to fixup loops. */ if (loops_state_satisfies_p (LOOPS_NEED_FIXUP)) child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP; /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */ num = vec_safe_length (child_cfun->local_decls); for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++) { t = (*child_cfun->local_decls)[srcidx]; if (DECL_CONTEXT (t) == cfun->decl) continue; if (srcidx != dstidx) (*child_cfun->local_decls)[dstidx] = t; dstidx++; } if (dstidx != num) vec_safe_truncate (child_cfun->local_decls, dstidx); /* Inform the callgraph about the new function. */ child_cfun->curr_properties = cfun->curr_properties; child_cfun->has_simduid_loops |= cfun->has_simduid_loops; child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops; cgraph_node *node = cgraph_node::get_create (child_fn); node->parallelized_function = 1; cgraph_node::add_new_function (child_fn, true); /* Add the new function to the offload table. */ if (ENABLE_OFFLOADING) { if (in_lto_p) DECL_PRESERVE_P (child_fn) = 1; vec_safe_push (offload_funcs, child_fn); } bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl) && !DECL_ASSEMBLER_NAME_SET_P (child_fn); /* Fix the callgraph edges for child_cfun. Those for cfun will be fixed in a following pass. */ push_cfun (child_cfun); if (need_asm) assign_assembler_name_if_needed (child_fn); cgraph_edge::rebuild_edges (); /* Some EH regions might become dead, see PR34608. If pass_cleanup_cfg isn't the first pass to happen with the new child, these dead EH edges might cause problems. Clean them up now. */ if (flag_exceptions) { basic_block bb; bool changed = false; FOR_EACH_BB_FN (bb, cfun) changed |= gimple_purge_dead_eh_edges (bb); if (changed) cleanup_tree_cfg (); } if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) verify_loop_structure (); pop_cfun (); if (dump_file && !gimple_in_ssa_p (cfun)) { omp_any_child_fn_dumped = true; dump_function_header (dump_file, child_fn, dump_flags); dump_function_to_file (child_fn, dump_file, dump_flags); } } /* Emit a library call to launch the offloading region, or do data transfers. */ tree t1, t2, t3, t4, device, cond, depend, c, clauses; enum built_in_function start_ix; location_t clause_loc; unsigned int flags_i = 0; switch (gimple_omp_target_kind (entry_stmt)) { case GF_OMP_TARGET_KIND_REGION: start_ix = BUILT_IN_GOMP_TARGET; break; case GF_OMP_TARGET_KIND_DATA: start_ix = BUILT_IN_GOMP_TARGET_DATA; break; case GF_OMP_TARGET_KIND_UPDATE: start_ix = BUILT_IN_GOMP_TARGET_UPDATE; break; case GF_OMP_TARGET_KIND_ENTER_DATA: start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA; break; case GF_OMP_TARGET_KIND_EXIT_DATA: start_ix = BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA; flags_i |= GOMP_TARGET_FLAG_EXIT_DATA; break; case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_PARALLEL: start_ix = BUILT_IN_GOACC_PARALLEL; break; case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: start_ix = BUILT_IN_GOACC_DATA_START; break; case GF_OMP_TARGET_KIND_OACC_UPDATE: start_ix = BUILT_IN_GOACC_UPDATE; break; case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: start_ix = BUILT_IN_GOACC_ENTER_EXIT_DATA; break; case GF_OMP_TARGET_KIND_OACC_DECLARE: start_ix = BUILT_IN_GOACC_DECLARE; break; default: gcc_unreachable (); } clauses = gimple_omp_target_clauses (entry_stmt); /* By default, the value of DEVICE is GOMP_DEVICE_ICV (let runtime library choose) and there is no conditional. */ cond = NULL_TREE; device = build_int_cst (integer_type_node, GOMP_DEVICE_ICV); c = omp_find_clause (clauses, OMP_CLAUSE_IF); if (c) cond = OMP_CLAUSE_IF_EXPR (c); c = omp_find_clause (clauses, OMP_CLAUSE_DEVICE); if (c) { /* Even if we pass it to all library function calls, it is currently only defined/used for the OpenMP target ones. */ gcc_checking_assert (start_ix == BUILT_IN_GOMP_TARGET || start_ix == BUILT_IN_GOMP_TARGET_DATA || start_ix == BUILT_IN_GOMP_TARGET_UPDATE || start_ix == BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA); device = OMP_CLAUSE_DEVICE_ID (c); clause_loc = OMP_CLAUSE_LOCATION (c); } else clause_loc = gimple_location (entry_stmt); c = omp_find_clause (clauses, OMP_CLAUSE_NOWAIT); if (c) flags_i |= GOMP_TARGET_FLAG_NOWAIT; /* Ensure 'device' is of the correct type. */ device = fold_convert_loc (clause_loc, integer_type_node, device); /* If we found the clause 'if (cond)', build (cond ? device : GOMP_DEVICE_HOST_FALLBACK). */ if (cond) { cond = gimple_boolify (cond); basic_block cond_bb, then_bb, else_bb; edge e; tree tmp_var; tmp_var = create_tmp_var (TREE_TYPE (device)); if (offloaded) e = split_block_after_labels (new_bb); else { gsi = gsi_last_nondebug_bb (new_bb); gsi_prev (&gsi); e = split_block (new_bb, gsi_stmt (gsi)); } cond_bb = e->src; new_bb = e->dest; remove_edge (e); then_bb = create_empty_bb (cond_bb); else_bb = create_empty_bb (then_bb); set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb); set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb); stmt = gimple_build_cond_empty (cond); gsi = gsi_last_bb (cond_bb); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (then_bb); stmt = gimple_build_assign (tmp_var, device); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); gsi = gsi_start_bb (else_bb); stmt = gimple_build_assign (tmp_var, build_int_cst (integer_type_node, GOMP_DEVICE_HOST_FALLBACK)); gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING); make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE); make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE); add_bb_to_loop (then_bb, cond_bb->loop_father); add_bb_to_loop (else_bb, cond_bb->loop_father); make_edge (then_bb, new_bb, EDGE_FALLTHRU); make_edge (else_bb, new_bb, EDGE_FALLTHRU); device = tmp_var; gsi = gsi_last_nondebug_bb (new_bb); } else { gsi = gsi_last_nondebug_bb (new_bb); device = force_gimple_operand_gsi (&gsi, device, true, NULL_TREE, true, GSI_SAME_STMT); } t = gimple_omp_target_data_arg (entry_stmt); if (t == NULL) { t1 = size_zero_node; t2 = build_zero_cst (ptr_type_node); t3 = t2; t4 = t2; } else { t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1)))); t1 = size_binop (PLUS_EXPR, t1, size_int (1)); t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0)); t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1)); t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2)); } gimple *g; bool tagging = false; /* The maximum number used by any start_ix, without varargs. */ auto_vec<tree, 11> args; args.quick_push (device); if (offloaded) args.quick_push (build_fold_addr_expr (child_fn)); args.quick_push (t1); args.quick_push (t2); args.quick_push (t3); args.quick_push (t4); switch (start_ix) { case BUILT_IN_GOACC_DATA_START: case BUILT_IN_GOACC_DECLARE: case BUILT_IN_GOMP_TARGET_DATA: break; case BUILT_IN_GOMP_TARGET: case BUILT_IN_GOMP_TARGET_UPDATE: case BUILT_IN_GOMP_TARGET_ENTER_EXIT_DATA: args.quick_push (build_int_cst (unsigned_type_node, flags_i)); c = omp_find_clause (clauses, OMP_CLAUSE_DEPEND); if (c) depend = OMP_CLAUSE_DECL (c); else depend = build_int_cst (ptr_type_node, 0); args.quick_push (depend); if (start_ix == BUILT_IN_GOMP_TARGET) args.quick_push (get_target_arguments (&gsi, entry_stmt)); break; case BUILT_IN_GOACC_PARALLEL: oacc_set_fn_attrib (child_fn, clauses, &args); tagging = true; /* FALLTHRU */ case BUILT_IN_GOACC_ENTER_EXIT_DATA: case BUILT_IN_GOACC_UPDATE: { tree t_async = NULL_TREE; /* If present, use the value specified by the respective clause, making sure that is of the correct type. */ c = omp_find_clause (clauses, OMP_CLAUSE_ASYNC); if (c) t_async = fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_ASYNC_EXPR (c)); else if (!tagging) /* Default values for t_async. */ t_async = fold_convert_loc (gimple_location (entry_stmt), integer_type_node, build_int_cst (integer_type_node, GOMP_ASYNC_SYNC)); if (tagging && t_async) { unsigned HOST_WIDE_INT i_async = GOMP_LAUNCH_OP_MAX; if (TREE_CODE (t_async) == INTEGER_CST) { /* See if we can pack the async arg in to the tag's operand. */ i_async = TREE_INT_CST_LOW (t_async); if (i_async < GOMP_LAUNCH_OP_MAX) t_async = NULL_TREE; else i_async = GOMP_LAUNCH_OP_MAX; } args.safe_push (oacc_launch_pack (GOMP_LAUNCH_ASYNC, NULL_TREE, i_async)); } if (t_async) args.safe_push (t_async); /* Save the argument index, and ... */ unsigned t_wait_idx = args.length (); unsigned num_waits = 0; c = omp_find_clause (clauses, OMP_CLAUSE_WAIT); if (!tagging || c) /* ... push a placeholder. */ args.safe_push (integer_zero_node); for (; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_WAIT) { args.safe_push (fold_convert_loc (OMP_CLAUSE_LOCATION (c), integer_type_node, OMP_CLAUSE_WAIT_EXPR (c))); num_waits++; } if (!tagging || num_waits) { tree len; /* Now that we know the number, update the placeholder. */ if (tagging) len = oacc_launch_pack (GOMP_LAUNCH_WAIT, NULL_TREE, num_waits); else len = build_int_cst (integer_type_node, num_waits); len = fold_convert_loc (gimple_location (entry_stmt), unsigned_type_node, len); args[t_wait_idx] = len; } } break; default: gcc_unreachable (); } if (tagging) /* Push terminal marker - zero. */ args.safe_push (oacc_launch_pack (0, NULL_TREE, 0)); g = gimple_build_call_vec (builtin_decl_explicit (start_ix), args); gimple_set_location (g, gimple_location (entry_stmt)); gsi_insert_before (&gsi, g, GSI_SAME_STMT); if (!offloaded) { g = gsi_stmt (gsi); gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET); gsi_remove (&gsi, true); } if (data_region && region->exit) { gsi = gsi_last_nondebug_bb (region->exit); g = gsi_stmt (gsi); gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN); gsi_remove (&gsi, true); } } /* Expand KFOR loop as a HSA grifidied kernel, i.e. as a body only with iteration variable derived from the thread number. INTRA_GROUP means this is an expansion of a loop iterating over work-items within a separate iteration over groups. */ static void grid_expand_omp_for_loop (struct omp_region *kfor, bool intra_group) { gimple_stmt_iterator gsi; gomp_for *for_stmt = as_a <gomp_for *> (last_stmt (kfor->entry)); gcc_checking_assert (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP); size_t collapse = gimple_omp_for_collapse (for_stmt); struct omp_for_data_loop *loops = XALLOCAVEC (struct omp_for_data_loop, gimple_omp_for_collapse (for_stmt)); struct omp_for_data fd; remove_edge (BRANCH_EDGE (kfor->entry)); basic_block body_bb = FALLTHRU_EDGE (kfor->entry)->dest; gcc_assert (kfor->cont); omp_extract_for_data (for_stmt, &fd, loops); gsi = gsi_start_bb (body_bb); for (size_t dim = 0; dim < collapse; dim++) { tree type, itype; itype = type = TREE_TYPE (fd.loops[dim].v); if (POINTER_TYPE_P (type)) itype = signed_type_for (type); tree n1 = fd.loops[dim].n1; tree step = fd.loops[dim].step; n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1), true, NULL_TREE, true, GSI_SAME_STMT); step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step), true, NULL_TREE, true, GSI_SAME_STMT); tree threadid; if (gimple_omp_for_grid_group_iter (for_stmt)) { gcc_checking_assert (!intra_group); threadid = build_call_expr (builtin_decl_explicit (BUILT_IN_HSA_WORKGROUPID), 1, build_int_cstu (unsigned_type_node, dim)); } else if (intra_group) threadid = build_call_expr (builtin_decl_explicit (BUILT_IN_HSA_WORKITEMID), 1, build_int_cstu (unsigned_type_node, dim)); else threadid = build_call_expr (builtin_decl_explicit (BUILT_IN_HSA_WORKITEMABSID), 1, build_int_cstu (unsigned_type_node, dim)); threadid = fold_convert (itype, threadid); threadid = force_gimple_operand_gsi (&gsi, threadid, true, NULL_TREE, true, GSI_SAME_STMT); tree startvar = fd.loops[dim].v; tree t = fold_build2 (MULT_EXPR, itype, threadid, step); if (POINTER_TYPE_P (type)) t = fold_build_pointer_plus (n1, t); else t = fold_build2 (PLUS_EXPR, type, t, n1); t = fold_convert (type, t); t = force_gimple_operand_gsi (&gsi, t, DECL_P (startvar) && TREE_ADDRESSABLE (startvar), NULL_TREE, true, GSI_SAME_STMT); gassign *assign_stmt = gimple_build_assign (startvar, t); gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT); } /* Remove the omp for statement. */ gsi = gsi_last_nondebug_bb (kfor->entry); gsi_remove (&gsi, true); /* Remove the GIMPLE_OMP_CONTINUE statement. */ gsi = gsi_last_nondebug_bb (kfor->cont); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_CONTINUE); gsi_remove (&gsi, true); /* Replace the GIMPLE_OMP_RETURN with a barrier, if necessary. */ gsi = gsi_last_nondebug_bb (kfor->exit); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); if (intra_group) gsi_insert_before (&gsi, omp_build_barrier (NULL_TREE), GSI_SAME_STMT); gsi_remove (&gsi, true); /* Fixup the much simpler CFG. */ remove_edge (find_edge (kfor->cont, body_bb)); if (kfor->cont != body_bb) set_immediate_dominator (CDI_DOMINATORS, kfor->cont, body_bb); set_immediate_dominator (CDI_DOMINATORS, kfor->exit, kfor->cont); } /* Structure passed to grid_remap_kernel_arg_accesses so that it can remap argument_decls. */ struct grid_arg_decl_map { tree old_arg; tree new_arg; }; /* Invoked through walk_gimple_op, will remap all PARM_DECLs to the ones pertaining to kernel function. */ static tree grid_remap_kernel_arg_accesses (tree *tp, int *walk_subtrees, void *data) { struct walk_stmt_info *wi = (struct walk_stmt_info *) data; struct grid_arg_decl_map *adm = (struct grid_arg_decl_map *) wi->info; tree t = *tp; if (t == adm->old_arg) *tp = adm->new_arg; *walk_subtrees = !TYPE_P (t) && !DECL_P (t); return NULL_TREE; } /* If TARGET region contains a kernel body for loop, remove its region from the TARGET and expand it in HSA gridified kernel fashion. */ static void grid_expand_target_grid_body (struct omp_region *target) { if (!hsa_gen_requested_p ()) return; gomp_target *tgt_stmt = as_a <gomp_target *> (last_stmt (target->entry)); struct omp_region **pp; for (pp = &target->inner; *pp; pp = &(*pp)->next) if ((*pp)->type == GIMPLE_OMP_GRID_BODY) break; struct omp_region *gpukernel = *pp; tree orig_child_fndecl = gimple_omp_target_child_fn (tgt_stmt); if (!gpukernel) { /* HSA cannot handle OACC stuff. */ if (gimple_omp_target_kind (tgt_stmt) != GF_OMP_TARGET_KIND_REGION) return; gcc_checking_assert (orig_child_fndecl); gcc_assert (!omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)); cgraph_node *n = cgraph_node::get (orig_child_fndecl); hsa_register_kernel (n); return; } gcc_assert (omp_find_clause (gimple_omp_target_clauses (tgt_stmt), OMP_CLAUSE__GRIDDIM_)); tree inside_block = gimple_block (first_stmt (single_succ (gpukernel->entry))); *pp = gpukernel->next; for (pp = &gpukernel->inner; *pp; pp = &(*pp)->next) if ((*pp)->type == GIMPLE_OMP_FOR) break; struct omp_region *kfor = *pp; gcc_assert (kfor); gomp_for *for_stmt = as_a <gomp_for *> (last_stmt (kfor->entry)); gcc_assert (gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP); *pp = kfor->next; if (kfor->inner) { if (gimple_omp_for_grid_group_iter (for_stmt)) { struct omp_region **next_pp; for (pp = &kfor->inner; *pp; pp = next_pp) { next_pp = &(*pp)->next; if ((*pp)->type != GIMPLE_OMP_FOR) continue; gomp_for *inner = as_a <gomp_for *> (last_stmt ((*pp)->entry)); gcc_assert (gimple_omp_for_kind (inner) == GF_OMP_FOR_KIND_GRID_LOOP); grid_expand_omp_for_loop (*pp, true); *pp = (*pp)->next; next_pp = pp; } } expand_omp (kfor->inner); } if (gpukernel->inner) expand_omp (gpukernel->inner); tree kern_fndecl = copy_node (orig_child_fndecl); DECL_NAME (kern_fndecl) = clone_function_name (kern_fndecl, "kernel"); SET_DECL_ASSEMBLER_NAME (kern_fndecl, DECL_NAME (kern_fndecl)); tree tgtblock = gimple_block (tgt_stmt); tree fniniblock = make_node (BLOCK); BLOCK_ABSTRACT_ORIGIN (fniniblock) = tgtblock; BLOCK_SOURCE_LOCATION (fniniblock) = BLOCK_SOURCE_LOCATION (tgtblock); BLOCK_SOURCE_END_LOCATION (fniniblock) = BLOCK_SOURCE_END_LOCATION (tgtblock); BLOCK_SUPERCONTEXT (fniniblock) = kern_fndecl; DECL_INITIAL (kern_fndecl) = fniniblock; push_struct_function (kern_fndecl); cfun->function_end_locus = gimple_location (tgt_stmt); init_tree_ssa (cfun); pop_cfun (); /* Make sure to generate early debug for the function before outlining anything. */ if (! gimple_in_ssa_p (cfun)) (*debug_hooks->early_global_decl) (cfun->decl); tree old_parm_decl = DECL_ARGUMENTS (kern_fndecl); gcc_assert (!DECL_CHAIN (old_parm_decl)); tree new_parm_decl = copy_node (DECL_ARGUMENTS (kern_fndecl)); DECL_CONTEXT (new_parm_decl) = kern_fndecl; DECL_ARGUMENTS (kern_fndecl) = new_parm_decl; gcc_assert (VOID_TYPE_P (TREE_TYPE (DECL_RESULT (kern_fndecl)))); DECL_RESULT (kern_fndecl) = copy_node (DECL_RESULT (kern_fndecl)); DECL_CONTEXT (DECL_RESULT (kern_fndecl)) = kern_fndecl; struct function *kern_cfun = DECL_STRUCT_FUNCTION (kern_fndecl); kern_cfun->curr_properties = cfun->curr_properties; grid_expand_omp_for_loop (kfor, false); /* Remove the omp for statement. */ gimple_stmt_iterator gsi = gsi_last_nondebug_bb (gpukernel->entry); gsi_remove (&gsi, true); /* Replace the GIMPLE_OMP_RETURN at the end of the kernel region with a real return. */ gsi = gsi_last_nondebug_bb (gpukernel->exit); gcc_assert (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN); gimple *ret_stmt = gimple_build_return (NULL); gsi_insert_after (&gsi, ret_stmt, GSI_SAME_STMT); gsi_remove (&gsi, true); /* Statements in the first BB in the target construct have been produced by target lowering and must be copied inside the GPUKERNEL, with the two exceptions of the first OMP statement and the OMP_DATA assignment statement. */ gsi = gsi_start_bb (single_succ (gpukernel->entry)); tree data_arg = gimple_omp_target_data_arg (tgt_stmt); tree sender = data_arg ? TREE_VEC_ELT (data_arg, 0) : NULL; for (gimple_stmt_iterator tsi = gsi_start_bb (single_succ (target->entry)); !gsi_end_p (tsi); gsi_next (&tsi)) { gimple *stmt = gsi_stmt (tsi); if (is_gimple_omp (stmt)) break; if (sender && is_gimple_assign (stmt) && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR && TREE_OPERAND (gimple_assign_rhs1 (stmt), 0) == sender) continue; gimple *copy = gimple_copy (stmt); gsi_insert_before (&gsi, copy, GSI_SAME_STMT); gimple_set_block (copy, fniniblock); } move_sese_region_to_fn (kern_cfun, single_succ (gpukernel->entry), gpukernel->exit, inside_block); cgraph_node *kcn = cgraph_node::get_create (kern_fndecl); kcn->mark_force_output (); cgraph_node *orig_child = cgraph_node::get (orig_child_fndecl); hsa_register_kernel (kcn, orig_child); cgraph_node::add_new_function (kern_fndecl, true); push_cfun (kern_cfun); cgraph_edge::rebuild_edges (); /* Re-map any mention of the PARM_DECL of the original function to the PARM_DECL of the new one. TODO: It would be great if lowering produced references into the GPU kernel decl straight away and we did not have to do this. */ struct grid_arg_decl_map adm; adm.old_arg = old_parm_decl; adm.new_arg = new_parm_decl; basic_block bb; FOR_EACH_BB_FN (bb, kern_cfun) { for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) { gimple *stmt = gsi_stmt (gsi); struct walk_stmt_info wi; memset (&wi, 0, sizeof (wi)); wi.info = &adm; walk_gimple_op (stmt, grid_remap_kernel_arg_accesses, &wi); } } pop_cfun (); return; } /* Expand the parallel region tree rooted at REGION. Expansion proceeds in depth-first order. Innermost regions are expanded first. This way, parallel regions that require a new function to be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any internal dependencies in their body. */ static void expand_omp (struct omp_region *region) { omp_any_child_fn_dumped = false; while (region) { location_t saved_location; gimple *inner_stmt = NULL; /* First, determine whether this is a combined parallel+workshare region. */ if (region->type == GIMPLE_OMP_PARALLEL) determine_parallel_type (region); else if (region->type == GIMPLE_OMP_TARGET) grid_expand_target_grid_body (region); if (region->type == GIMPLE_OMP_FOR && gimple_omp_for_combined_p (last_stmt (region->entry))) inner_stmt = last_stmt (region->inner->entry); if (region->inner) expand_omp (region->inner); saved_location = input_location; if (gimple_has_location (last_stmt (region->entry))) input_location = gimple_location (last_stmt (region->entry)); switch (region->type) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: expand_omp_taskreg (region); break; case GIMPLE_OMP_FOR: expand_omp_for (region, inner_stmt); break; case GIMPLE_OMP_SECTIONS: expand_omp_sections (region); break; case GIMPLE_OMP_SECTION: /* Individual omp sections are handled together with their parent GIMPLE_OMP_SECTIONS region. */ break; case GIMPLE_OMP_SINGLE: expand_omp_single (region); break; case GIMPLE_OMP_ORDERED: { gomp_ordered *ord_stmt = as_a <gomp_ordered *> (last_stmt (region->entry)); if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt), OMP_CLAUSE_DEPEND)) { /* We'll expand these when expanding corresponding worksharing region with ordered(n) clause. */ gcc_assert (region->outer && region->outer->type == GIMPLE_OMP_FOR); region->ord_stmt = ord_stmt; break; } } /* FALLTHRU */ case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_TEAMS: expand_omp_synch (region); break; case GIMPLE_OMP_ATOMIC_LOAD: expand_omp_atomic (region); break; case GIMPLE_OMP_TARGET: expand_omp_target (region); break; default: gcc_unreachable (); } input_location = saved_location; region = region->next; } if (omp_any_child_fn_dumped) { if (dump_file) dump_function_header (dump_file, current_function_decl, dump_flags); omp_any_child_fn_dumped = false; } } /* Helper for build_omp_regions. Scan the dominator tree starting at block BB. PARENT is the region that contains BB. If SINGLE_TREE is true, the function ends once a single tree is built (otherwise, whole forest of OMP constructs may be built). */ static void build_omp_regions_1 (basic_block bb, struct omp_region *parent, bool single_tree) { gimple_stmt_iterator gsi; gimple *stmt; basic_block son; gsi = gsi_last_nondebug_bb (bb); if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi))) { struct omp_region *region; enum gimple_code code; stmt = gsi_stmt (gsi); code = gimple_code (stmt); if (code == GIMPLE_OMP_RETURN) { /* STMT is the return point out of region PARENT. Mark it as the exit point and make PARENT the immediately enclosing region. */ gcc_assert (parent); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_ATOMIC_STORE) { /* GIMPLE_OMP_ATOMIC_STORE is analogous to GIMPLE_OMP_RETURN, but matches with GIMPLE_OMP_ATOMIC_LOAD. */ gcc_assert (parent); gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD); region = parent; region->exit = bb; parent = parent->outer; } else if (code == GIMPLE_OMP_CONTINUE) { gcc_assert (parent); parent->cont = bb; } else if (code == GIMPLE_OMP_SECTIONS_SWITCH) { /* GIMPLE_OMP_SECTIONS_SWITCH is part of GIMPLE_OMP_SECTIONS, and we do nothing for it. */ } else { region = new_omp_region (bb, code, parent); /* Otherwise... */ if (code == GIMPLE_OMP_TARGET) { switch (gimple_omp_target_kind (stmt)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: break; case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_ENTER_DATA: case GF_OMP_TARGET_KIND_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_DECLARE: /* ..., other than for those stand-alone directives... */ region = NULL; break; default: gcc_unreachable (); } } else if (code == GIMPLE_OMP_ORDERED && omp_find_clause (gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt)), OMP_CLAUSE_DEPEND)) /* #pragma omp ordered depend is also just a stand-alone directive. */ region = NULL; /* ..., this directive becomes the parent for a new region. */ if (region) parent = region; } } if (single_tree && !parent) return; for (son = first_dom_son (CDI_DOMINATORS, bb); son; son = next_dom_son (CDI_DOMINATORS, son)) build_omp_regions_1 (son, parent, single_tree); } /* Builds the tree of OMP regions rooted at ROOT, storing it to root_omp_region. */ static void build_omp_regions_root (basic_block root) { gcc_assert (root_omp_region == NULL); build_omp_regions_1 (root, NULL, true); gcc_assert (root_omp_region != NULL); } /* Expands omp construct (and its subconstructs) starting in HEAD. */ void omp_expand_local (basic_block head) { build_omp_regions_root (head); if (dump_file && (dump_flags & TDF_DETAILS)) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); omp_free_regions (); } /* Scan the CFG and build a tree of OMP regions. Return the root of the OMP region tree. */ static void build_omp_regions (void) { gcc_assert (root_omp_region == NULL); calculate_dominance_info (CDI_DOMINATORS); build_omp_regions_1 (ENTRY_BLOCK_PTR_FOR_FN (cfun), NULL, false); } /* Main entry point for expanding OMP-GIMPLE into runtime calls. */ static unsigned int execute_expand_omp (void) { build_omp_regions (); if (!root_omp_region) return 0; if (dump_file) { fprintf (dump_file, "\nOMP region tree\n\n"); dump_omp_region (dump_file, root_omp_region, 0); fprintf (dump_file, "\n"); } remove_exit_barriers (root_omp_region); expand_omp (root_omp_region); if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP)) verify_loop_structure (); cleanup_tree_cfg (); omp_free_regions (); return 0; } /* OMP expansion -- the default pass, run before creation of SSA form. */ namespace { const pass_data pass_data_expand_omp = { GIMPLE_PASS, /* type */ "ompexp", /* name */ OPTGROUP_OMP, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_gimple_any, /* properties_required */ PROP_gimple_eomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ 0, /* todo_flags_finish */ }; class pass_expand_omp : public gimple_opt_pass { public: pass_expand_omp (gcc::context *ctxt) : gimple_opt_pass (pass_data_expand_omp, ctxt) {} /* opt_pass methods: */ virtual unsigned int execute (function *) { bool gate = ((flag_openacc != 0 || flag_openmp != 0 || flag_openmp_simd != 0) && !seen_error ()); /* This pass always runs, to provide PROP_gimple_eomp. But often, there is nothing to do. */ if (!gate) return 0; return execute_expand_omp (); } }; // class pass_expand_omp } // anon namespace gimple_opt_pass * make_pass_expand_omp (gcc::context *ctxt) { return new pass_expand_omp (ctxt); } namespace { const pass_data pass_data_expand_omp_ssa = { GIMPLE_PASS, /* type */ "ompexpssa", /* name */ OPTGROUP_OMP, /* optinfo_flags */ TV_NONE, /* tv_id */ PROP_cfg | PROP_ssa, /* properties_required */ PROP_gimple_eomp, /* properties_provided */ 0, /* properties_destroyed */ 0, /* todo_flags_start */ TODO_cleanup_cfg | TODO_rebuild_alias, /* todo_flags_finish */ }; class pass_expand_omp_ssa : public gimple_opt_pass { public: pass_expand_omp_ssa (gcc::context *ctxt) : gimple_opt_pass (pass_data_expand_omp_ssa, ctxt) {} /* opt_pass methods: */ virtual bool gate (function *fun) { return !(fun->curr_properties & PROP_gimple_eomp); } virtual unsigned int execute (function *) { return execute_expand_omp (); } opt_pass * clone () { return new pass_expand_omp_ssa (m_ctxt); } }; // class pass_expand_omp_ssa } // anon namespace gimple_opt_pass * make_pass_expand_omp_ssa (gcc::context *ctxt) { return new pass_expand_omp_ssa (ctxt); } /* Called from tree-cfg.c::make_edges to create cfg edges for all relevant GIMPLE_* codes. */ bool omp_make_gimple_edges (basic_block bb, struct omp_region **region, int *region_idx) { gimple *last = last_stmt (bb); enum gimple_code code = gimple_code (last); struct omp_region *cur_region = *region; bool fallthru = false; switch (code) { case GIMPLE_OMP_PARALLEL: case GIMPLE_OMP_TASK: case GIMPLE_OMP_FOR: case GIMPLE_OMP_SINGLE: case GIMPLE_OMP_TEAMS: case GIMPLE_OMP_MASTER: case GIMPLE_OMP_TASKGROUP: case GIMPLE_OMP_CRITICAL: case GIMPLE_OMP_SECTION: case GIMPLE_OMP_GRID_BODY: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; break; case GIMPLE_OMP_ORDERED: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; if (omp_find_clause (gimple_omp_ordered_clauses (as_a <gomp_ordered *> (last)), OMP_CLAUSE_DEPEND)) cur_region = cur_region->outer; break; case GIMPLE_OMP_TARGET: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; switch (gimple_omp_target_kind (last)) { case GF_OMP_TARGET_KIND_REGION: case GF_OMP_TARGET_KIND_DATA: case GF_OMP_TARGET_KIND_OACC_PARALLEL: case GF_OMP_TARGET_KIND_OACC_KERNELS: case GF_OMP_TARGET_KIND_OACC_DATA: case GF_OMP_TARGET_KIND_OACC_HOST_DATA: break; case GF_OMP_TARGET_KIND_UPDATE: case GF_OMP_TARGET_KIND_ENTER_DATA: case GF_OMP_TARGET_KIND_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_UPDATE: case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA: case GF_OMP_TARGET_KIND_OACC_DECLARE: cur_region = cur_region->outer; break; default: gcc_unreachable (); } break; case GIMPLE_OMP_SECTIONS: cur_region = new_omp_region (bb, code, cur_region); fallthru = true; break; case GIMPLE_OMP_SECTIONS_SWITCH: fallthru = false; break; case GIMPLE_OMP_ATOMIC_LOAD: case GIMPLE_OMP_ATOMIC_STORE: fallthru = true; break; case GIMPLE_OMP_RETURN: /* In the case of a GIMPLE_OMP_SECTION, the edge will go somewhere other than the next block. This will be created later. */ cur_region->exit = bb; if (cur_region->type == GIMPLE_OMP_TASK) /* Add an edge corresponding to not scheduling the task immediately. */ make_edge (cur_region->entry, bb, EDGE_ABNORMAL); fallthru = cur_region->type != GIMPLE_OMP_SECTION; cur_region = cur_region->outer; break; case GIMPLE_OMP_CONTINUE: cur_region->cont = bb; switch (cur_region->type) { case GIMPLE_OMP_FOR: /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE succs edges as abnormal to prevent splitting them. */ single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL; /* Make the loopback edge. */ make_edge (bb, single_succ (cur_region->entry), EDGE_ABNORMAL); /* Create an edge from GIMPLE_OMP_FOR to exit, which corresponds to the case that the body of the loop is not executed at all. */ make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL); make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL); fallthru = false; break; case GIMPLE_OMP_SECTIONS: /* Wire up the edges into and out of the nested sections. */ { basic_block switch_bb = single_succ (cur_region->entry); struct omp_region *i; for (i = cur_region->inner; i ; i = i->next) { gcc_assert (i->type == GIMPLE_OMP_SECTION); make_edge (switch_bb, i->entry, 0); make_edge (i->exit, bb, EDGE_FALLTHRU); } /* Make the loopback edge to the block with GIMPLE_OMP_SECTIONS_SWITCH. */ make_edge (bb, switch_bb, 0); /* Make the edge from the switch to exit. */ make_edge (switch_bb, bb->next_bb, 0); fallthru = false; } break; case GIMPLE_OMP_TASK: fallthru = true; break; default: gcc_unreachable (); } break; default: gcc_unreachable (); } if (*region != cur_region) { *region = cur_region; if (cur_region) *region_idx = cur_region->entry->index; else *region_idx = 0; } return fallthru; } #include "gt-omp-expand.h"
atomic-23.c
/* PR middle-end/88968 */ /* { dg-do compile } */ struct __attribute__((packed)) S { unsigned int a : 16; unsigned int b : 1; } s; void f1 (void) { #pragma omp atomic ++s.a; } int f2 (void) { int r; #pragma omp atomic capture { r = s.a; s.a = 0; } return r; } int f3 (void) { int r; #pragma omp atomic capture { r = s.a; s.a = s.a + 32; } return r; } int f4 (void) { int r; #pragma omp atomic capture r = s.a = s.a + 32; return r; }
convolutiondepthwise_3x3_pack8_fp16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f); const unsigned short* k0 = (const unsigned short*)kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = loadfp16(k0); __m256 _k01 = loadfp16(k0 + 8); __m256 _k02 = loadfp16(k0 + 16); __m256 _k10 = loadfp16(k0 + 24); __m256 _k11 = loadfp16(k0 + 32); __m256 _k12 = loadfp16(k0 + 40); __m256 _k20 = loadfp16(k0 + 48); __m256 _k21 = loadfp16(k0 + 56); __m256 _k22 = loadfp16(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1); __m256 _sum2 = _bias0; __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2); __m256 _sum3 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3); __m256 _sum4 = _bias0; __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r16 = _mm256_loadu_ps(r1 + 48); __m256 _r26 = _mm256_loadu_ps(r2 + 48); _mm256_storeu_ps(outptr0 + 24, _sum3); _sum4 = _mm256_comp_fmadd_ps(_k00, _r04, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k01, _r05, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k02, _r06, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k10, _r14, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k11, _r15, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k12, _r16, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k20, _r24, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k21, _r25, _sum4); _sum4 = _mm256_comp_fmadd_ps(_k22, _r26, _sum4); __m256 _sum5 = _bias0; __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _r17 = _mm256_loadu_ps(r1 + 56); __m256 _r27 = _mm256_loadu_ps(r2 + 56); _mm256_storeu_ps(outptr0 + 32, _sum4); _sum5 = _mm256_comp_fmadd_ps(_k00, _r05, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k01, _r06, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k02, _r07, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k10, _r15, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k11, _r16, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k12, _r17, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k20, _r25, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k21, _r26, _sum5); _sum5 = _mm256_comp_fmadd_ps(_k22, _r27, _sum5); __m256 _sum6 = _bias0; __m256 _r08 = _mm256_loadu_ps(r0 + 64); __m256 _r18 = _mm256_loadu_ps(r1 + 64); __m256 _r28 = _mm256_loadu_ps(r2 + 64); _mm256_storeu_ps(outptr0 + 40, _sum5); _sum6 = _mm256_comp_fmadd_ps(_k00, _r06, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k01, _r07, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k02, _r08, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k10, _r16, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k11, _r17, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k12, _r18, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k20, _r26, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k21, _r27, _sum6); _sum6 = _mm256_comp_fmadd_ps(_k22, _r28, _sum6); __m256 _sum7 = _bias0; __m256 _r09 = _mm256_loadu_ps(r0 + 72); __m256 _r19 = _mm256_loadu_ps(r1 + 72); __m256 _r29 = _mm256_loadu_ps(r2 + 72); _mm256_storeu_ps(outptr0 + 48, _sum6); _sum7 = _mm256_comp_fmadd_ps(_k00, _r07, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k01, _r08, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k02, _r09, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k10, _r17, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k11, _r18, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k12, _r19, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k20, _r27, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k21, _r28, _sum7); _sum7 = _mm256_comp_fmadd_ps(_k22, _r29, _sum7); _mm256_storeu_ps(outptr0 + 56, _sum7); r0 += 64; r1 += 64; r2 += 64; outptr0 += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1); __m256 _sum2 = _bias0; __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k00, _r02, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k01, _r03, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k02, _r04, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k10, _r12, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k11, _r13, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k12, _r14, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k20, _r22, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k21, _r23, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k22, _r24, _sum2); __m256 _sum3 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k00, _r03, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k01, _r04, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k02, _r05, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k10, _r13, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k11, _r14, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k12, _r15, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k20, _r23, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k21, _r24, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k22, _r25, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 32; r1 += 32; r2 += 32; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r21, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r23, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 8; r1 += 8; r2 += 8; outptr0 += 8; } r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; } } } static void convdw3x3s2_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 8; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + g * 8) : _mm256_set1_ps(0.f); const unsigned short* k0 = (const unsigned short*)kernel.row(g); float* outptr0 = out.row(0); const Mat img0 = bottom_blob.channel(g); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); __m256 _k00 = loadfp16(k0); __m256 _k01 = loadfp16(k0 + 8); __m256 _k02 = loadfp16(k0 + 16); __m256 _k10 = loadfp16(k0 + 24); __m256 _k11 = loadfp16(k0 + 32); __m256 _k12 = loadfp16(k0 + 40); __m256 _k20 = loadfp16(k0 + 48); __m256 _k21 = loadfp16(k0 + 56); __m256 _k22 = loadfp16(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1); __m256 _sum2 = _bias0; __m256 _r05 = _mm256_loadu_ps(r0 + 40); __m256 _r15 = _mm256_loadu_ps(r1 + 40); __m256 _r25 = _mm256_loadu_ps(r2 + 40); __m256 _r06 = _mm256_loadu_ps(r0 + 48); __m256 _r16 = _mm256_loadu_ps(r1 + 48); __m256 _r26 = _mm256_loadu_ps(r2 + 48); _mm256_storeu_ps(outptr0 + 8, _sum1); _sum2 = _mm256_comp_fmadd_ps(_k00, _r04, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k01, _r05, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k02, _r06, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k10, _r14, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k11, _r15, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k12, _r16, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k20, _r24, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k21, _r25, _sum2); _sum2 = _mm256_comp_fmadd_ps(_k22, _r26, _sum2); __m256 _sum3 = _bias0; __m256 _r07 = _mm256_loadu_ps(r0 + 56); __m256 _r17 = _mm256_loadu_ps(r1 + 56); __m256 _r27 = _mm256_loadu_ps(r2 + 56); __m256 _r08 = _mm256_loadu_ps(r0 + 64); __m256 _r18 = _mm256_loadu_ps(r1 + 64); __m256 _r28 = _mm256_loadu_ps(r2 + 64); _mm256_storeu_ps(outptr0 + 16, _sum2); _sum3 = _mm256_comp_fmadd_ps(_k00, _r06, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k01, _r07, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k02, _r08, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k10, _r16, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k11, _r17, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k12, _r18, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k20, _r26, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k21, _r27, _sum3); _sum3 = _mm256_comp_fmadd_ps(_k22, _r28, _sum3); _mm256_storeu_ps(outptr0 + 24, _sum3); r0 += 2 * 32; r1 += 2 * 32; r2 += 2 * 32; outptr0 += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); __m256 _sum1 = _bias0; __m256 _r03 = _mm256_loadu_ps(r0 + 24); __m256 _r13 = _mm256_loadu_ps(r1 + 24); __m256 _r23 = _mm256_loadu_ps(r2 + 24); __m256 _r04 = _mm256_loadu_ps(r0 + 32); __m256 _r14 = _mm256_loadu_ps(r1 + 32); __m256 _r24 = _mm256_loadu_ps(r2 + 32); _mm256_storeu_ps(outptr0, _sum0); _sum1 = _mm256_comp_fmadd_ps(_k00, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k10, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k20, _r22, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k21, _r23, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k22, _r24, _sum1); _mm256_storeu_ps(outptr0 + 8, _sum1); r0 += 2 * 16; r1 += 2 * 16; r2 += 2 * 16; outptr0 += 16; } for (; j < outw; j++) { __m256 _sum0 = _bias0; __m256 _r00 = _mm256_loadu_ps(r0); __m256 _r01 = _mm256_loadu_ps(r0 + 8); __m256 _r02 = _mm256_loadu_ps(r0 + 16); __m256 _r10 = _mm256_loadu_ps(r1); __m256 _r11 = _mm256_loadu_ps(r1 + 8); __m256 _r12 = _mm256_loadu_ps(r1 + 16); __m256 _r20 = _mm256_loadu_ps(r2); __m256 _r21 = _mm256_loadu_ps(r2 + 8); __m256 _r22 = _mm256_loadu_ps(r2 + 16); _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k20, _r20, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k21, _r21, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k22, _r22, _sum0); _mm256_storeu_ps(outptr0, _sum0); r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
OnDiscMSExperiment.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2017. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #ifndef OPENMS_KERNEL_ONDISCMSEXPERIMENT_H #define OPENMS_KERNEL_ONDISCMSEXPERIMENT_H #include <OpenMS/INTERFACES/DataStructures.h> #include <OpenMS/KERNEL/MSExperiment.h> #include <OpenMS/KERNEL/MSSpectrum.h> #include <OpenMS/KERNEL/MSChromatogram.h> #include <OpenMS/METADATA/ExperimentalSettings.h> #include <OpenMS/FORMAT/IndexedMzMLFile.h> #include <OpenMS/FORMAT/MzMLFile.h> #include <vector> #include <algorithm> #include <limits> #include <boost/shared_ptr.hpp> namespace OpenMS { /** @brief Representation of a mass spectrometry experiment on disk. @ingroup Kernel @note This implementation is @a not thread-safe since it keeps internally a single file access pointer which it moves when accessing a specific data item. Please provide a separate copy to each thread, e.g. @code #pragma omp parallel for firstprivate(ondisc_map) @endcode */ class OnDiscMSExperiment { typedef ChromatogramPeak ChromatogramPeakT; typedef Peak1D PeakT; public: /** @brief Constructor This initializes the object, use openFile to open a file. */ OnDiscMSExperiment() {} /** @brief Open a specific file on disk. This tries to read the indexed mzML by parsing the index and then reading the meta information into memory. @return Whether the parsing of the file was successful (if false, the file most likely was not an indexed mzML file) */ bool openFile(const String& filename, bool skipMetaData = false) { filename_ = filename; indexed_mzml_file_.openFile(filename); if (filename != "" && !skipMetaData) { loadMetaData_(filename); } return indexed_mzml_file_.getParsingSuccess(); } /// Copy constructor OnDiscMSExperiment(const OnDiscMSExperiment& source) : filename_(source.filename_), indexed_mzml_file_(source.indexed_mzml_file_), meta_ms_experiment_(source.meta_ms_experiment_) { } /** @brief Equality operator This only checks whether the underlying file is the same and the parsed meta-information is the same. Note that the file reader (e.g. the std::ifstream of the file) might be in a different state. */ bool operator==(const OnDiscMSExperiment& rhs) const { // check if file and meta information is the same return filename_ == rhs.filename_ && (*meta_ms_experiment_) == (*rhs.meta_ms_experiment_); // do not check if indexed_mzml_file_ is equal -> they have the same filename... } /// Inequality operator bool operator!=(const OnDiscMSExperiment& rhs) const { return !(operator==(rhs)); } /** @brief Checks if all spectra are sorted with respect to ascending RT Note that we cannot check whether all spectra are sorted (except if we were to load them all and check). */ bool isSortedByRT() const { return meta_ms_experiment_->isSorted(false); } /// alias for getNrSpectra inline Size size() const { return getNrSpectra(); } /// returns whether spectra are empty inline bool empty() const { return indexed_mzml_file_.getNrSpectra() == 0; } /// get the total number of spectra available inline Size getNrSpectra() const { return indexed_mzml_file_.getNrSpectra(); } /// get the total number of chromatograms available inline Size getNrChromatograms() const { return indexed_mzml_file_.getNrChromatograms(); } /// returns the meta information of this experiment (const access) boost::shared_ptr<const ExperimentalSettings> getExperimentalSettings() const { return boost::static_pointer_cast<const ExperimentalSettings>(meta_ms_experiment_); } /// alias for getSpectrum inline MSSpectrum operator[](Size n) { return getSpectrum(n); } /** @brief returns a single spectrum TODO: make this more efficient by reducing the copying */ MSSpectrum getSpectrum(Size id) { OpenMS::Interfaces::SpectrumPtr sptr = indexed_mzml_file_.getSpectrumById(static_cast<int>(id)); MSSpectrum spectrum(meta_ms_experiment_->operator[](id)); // recreate a spectrum from the data arrays! OpenMS::Interfaces::BinaryDataArrayPtr mz_arr = sptr->getMZArray(); OpenMS::Interfaces::BinaryDataArrayPtr int_arr = sptr->getIntensityArray(); spectrum.reserve(mz_arr->data.size()); for (Size i = 0; i < mz_arr->data.size(); i++) { PeakT p; p.setMZ(mz_arr->data[i]); p.setIntensity(int_arr->data[i]); spectrum.push_back(p); } return spectrum; } /** @brief returns a single spectrum */ OpenMS::Interfaces::SpectrumPtr getSpectrumById(Size id) { return indexed_mzml_file_.getSpectrumById(id); } /** @brief returns a single chromatogram TODO: make this more efficient by reducing the copying */ MSChromatogram getChromatogram(Size id) { OpenMS::Interfaces::ChromatogramPtr cptr = indexed_mzml_file_.getChromatogramById(static_cast<int>(id)); MSChromatogram chromatogram(meta_ms_experiment_->getChromatogram(id)); // recreate a chromatogram from the data arrays! OpenMS::Interfaces::BinaryDataArrayPtr rt_arr = cptr->getTimeArray(); OpenMS::Interfaces::BinaryDataArrayPtr int_arr = cptr->getIntensityArray(); chromatogram.reserve(rt_arr->data.size()); for (Size i = 0; i < rt_arr->data.size(); i++) { ChromatogramPeakT p; p.setRT(rt_arr->data[i]); p.setIntensity(int_arr->data[i]); chromatogram.push_back(p); } return chromatogram; } /** @brief returns a single chromatogram */ OpenMS::Interfaces::ChromatogramPtr getChromatogramById(Size id) { return indexed_mzml_file_.getChromatogramById(id); } ///sets whether to skip some XML checks and be fast instead void setSkipXMLChecks(bool skip) { indexed_mzml_file_.setSkipXMLChecks(skip); } private: /// Private Assignment operator -> we cannot copy file streams in IndexedMzMLFile OnDiscMSExperiment& operator=(const OnDiscMSExperiment& /* source */); void loadMetaData_(const String& filename) { meta_ms_experiment_ = boost::shared_ptr< PeakMap >(new PeakMap); MzMLFile f; PeakFileOptions options = f.getOptions(); options.setFillData(false); f.setOptions(options); f.load(filename, *meta_ms_experiment_.get()); } protected: /// The filename of the underlying data file String filename_; /// The index of the underlying data file IndexedMzMLFile indexed_mzml_file_; /// The meta-data boost::shared_ptr<PeakMap> meta_ms_experiment_; }; typedef OpenMS::OnDiscMSExperiment OnDiscPeakMap; } // namespace OpenMS #endif // OPENMS_KERNEL_ONDISCMSEXPERIMENT_H
Vector_dot_product_mp_reduction.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <time.h> #define n 200000 int main() { double a[n],b[n], sum=0.0,random_a,random_b; float startTime, endTime,execTime; int i; srand(time(0)); startTime = omp_get_wtime(); #pragma omp parallel private (i) shared (a,b) reduction(+:sum) { #pragma omp for for(i=0;i<n;i++) { random_a = rand(); random_b = rand(); a[i] = i * random_a; b[i] = i * random_b; for(int j=0;j<n;j++) sum = sum + a[i] * b[i]; } } endTime = omp_get_wtime(); execTime = endTime - startTime; printf("%f \n",execTime); return(0); }
pfmg_setup_rap5.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision: 2.27 $ ***********************************************************************EHEADER*/ #include "_hypre_struct_ls.h" #include "pfmg.h" /*-------------------------------------------------------------------------- * Macro to "change coordinates". This routine is written as though * coarsening is being done in the y-direction. This macro is used to * allow for coarsening to be done in the x-direction also. *--------------------------------------------------------------------------*/ #define MapIndex(in_index, cdir, out_index) \ hypre_IndexD(out_index, 2) = hypre_IndexD(in_index, 2); \ hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \ cdir = (cdir + 1) % 2; \ hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \ cdir = (cdir + 1) % 2; /*-------------------------------------------------------------------------- * hypre_PFMGCreateCoarseOp5 * Sets up new coarse grid operator stucture. Fine grid * operator is 5pt and so is coarse, i.e. non-Galerkin. *--------------------------------------------------------------------------*/ hypre_StructMatrix * hypre_PFMGCreateCoarseOp5( hypre_StructMatrix *R, hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructGrid *coarse_grid, HYPRE_Int cdir ) { hypre_StructMatrix *RAP; hypre_Index *RAP_stencil_shape; hypre_StructStencil *RAP_stencil; HYPRE_Int RAP_stencil_size; HYPRE_Int RAP_stencil_dim; HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1}; hypre_Index index_temp; HYPRE_Int j, i; HYPRE_Int stencil_rank; RAP_stencil_dim = 2; /*----------------------------------------------------------------------- * Define RAP_stencil *-----------------------------------------------------------------------*/ stencil_rank = 0; /*----------------------------------------------------------------------- * non-symmetric case *-----------------------------------------------------------------------*/ if (!hypre_StructMatrixSymmetric(A)) { /*-------------------------------------------------------------------- * 5 point coarse grid stencil *--------------------------------------------------------------------*/ RAP_stencil_size = 5; RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (j = -1; j < 2; j++) { for (i = -1; i < 2; i++) { /*-------------------------------------------------------------- * Storage for 5 elements (c,w,e,n,s) *--------------------------------------------------------------*/ if (i*j == 0) { hypre_SetIndex(index_temp,i,j,0); MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]); stencil_rank++; } } } } /*----------------------------------------------------------------------- * symmetric case *-----------------------------------------------------------------------*/ else { /*-------------------------------------------------------------------- * 5 point coarse grid stencil * Only store the lower triangular part + diagonal = 3 entries, * lower triangular means the lower triangular part on the matrix * in the standard lexicographic ordering. *--------------------------------------------------------------------*/ RAP_stencil_size = 3; RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size); for (j = -1; j < 1; j++) { for (i = -1; i < 1; i++) { /*-------------------------------------------------------------- * Store 3 elements in (c,w,s) *--------------------------------------------------------------*/ if( i*j == 0 ) { hypre_SetIndex(index_temp,i,j,0); MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]); stencil_rank++; } } } } RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size, RAP_stencil_shape); RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A), coarse_grid, RAP_stencil); hypre_StructStencilDestroy(RAP_stencil); /*----------------------------------------------------------------------- * Coarse operator in symmetric iff fine operator is *-----------------------------------------------------------------------*/ hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A); /*----------------------------------------------------------------------- * Set number of ghost points - one one each boundary *-----------------------------------------------------------------------*/ hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost); return RAP; } /*-------------------------------------------------------------------------- * hypre_PFMGBuildCoarseOp5 * Sets up new coarse grid operator stucture. Fine grid operator is 5pt and * so is coarse, i.e. non-Galerkin. * * Uses the non-Galerkin strategy from Ashby & Falgout's original ParFlow * algorithm. For constant_coefficient==2, see [issue663]. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_PFMGBuildCoarseOp5( hypre_StructMatrix *A, hypre_StructMatrix *P, hypre_StructMatrix *R, HYPRE_Int cdir, hypre_Index cindex, hypre_Index cstride, hypre_StructMatrix *RAP ) { hypre_Index index; hypre_Index index_temp; hypre_StructGrid *fgrid; hypre_BoxArray *fgrid_boxes; hypre_Box *fgrid_box; HYPRE_Int *fgrid_ids; hypre_StructGrid *cgrid; hypre_BoxArray *cgrid_boxes; hypre_Box *cgrid_box; HYPRE_Int *cgrid_ids; hypre_IndexRef cstart, bfstart, stridef; hypre_Index fstart, bcstart, stridec; hypre_Index loop_size; HYPRE_Int constant_coefficient; HYPRE_Int fi, ci, fbi; hypre_Box *A_dbox; hypre_Box *P_dbox; hypre_Box *RAP_dbox; hypre_BoxArray *bdy_boxes, *tmp_boxes; hypre_Box *bdy_box, *fcbox; double *pb, *pa; double *a_cc, *a_cw, *a_ce, *a_cb, *a_ca; double *rap_cc, *rap_cw, *rap_ce; double *rap_cb, *rap_ca; double west, east; double center_int, center_bdy; HYPRE_Int iA, iAm1, iAp1; HYPRE_Int iAc; HYPRE_Int iP, iPm1, iPp1; HYPRE_Int OffsetA; HYPRE_Int OffsetP; stridef = cstride; hypre_SetIndex(stridec, 1, 1, 1); fgrid = hypre_StructMatrixGrid(A); fgrid_boxes = hypre_StructGridBoxes(fgrid); fgrid_ids = hypre_StructGridIDs(fgrid); cgrid = hypre_StructMatrixGrid(RAP); cgrid_boxes = hypre_StructGridBoxes(cgrid); cgrid_ids = hypre_StructGridIDs(cgrid); constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP); hypre_assert( hypre_StructMatrixConstantCoefficient(A) == constant_coefficient ); if ( constant_coefficient==0 ) { hypre_assert( hypre_StructMatrixConstantCoefficient(R) == 0 ); hypre_assert( hypre_StructMatrixConstantCoefficient(P) == 0 ); } else /* 1 or 2 */ { hypre_assert( hypre_StructMatrixConstantCoefficient(R) == 1 ); hypre_assert( hypre_StructMatrixConstantCoefficient(P) == 1 ); } fcbox = hypre_BoxCreate(); bdy_boxes = hypre_BoxArrayCreate(0); tmp_boxes = hypre_BoxArrayCreate(0); fi = 0; hypre_ForBoxI(ci, cgrid_boxes) { while (fgrid_ids[fi] != cgrid_ids[ci]) { fi++; } cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci); fgrid_box = hypre_BoxArrayBox(fgrid_boxes, fi); cstart = hypre_BoxIMin(cgrid_box); hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart); A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi); P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi); RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci); /*----------------------------------------------------------------- * Extract pointers for interpolation operator: * pb is pointer for weight for f-point below c-point * pa is pointer for weight for f-point above c-point *-----------------------------------------------------------------*/ hypre_SetIndex(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index); hypre_SetIndex(index_temp,0,1,0); MapIndex(index_temp, cdir, index); pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) - hypre_BoxOffsetDistance(P_dbox, index); /*----------------------------------------------------------------- * Extract pointers for 5-point fine grid operator: * * a_cc is pointer for center coefficient * a_cw is pointer for west coefficient * a_ce is pointer for east coefficient * a_cb is pointer for below coefficient * a_ca is pointer for above coefficient *-----------------------------------------------------------------*/ hypre_SetIndex(index_temp,0,0,0); MapIndex(index_temp, cdir, index); a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index_temp,1,0,0); MapIndex(index_temp, cdir, index); a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); a_cb = hypre_StructMatrixExtractPointerByIndex(A, fi, index); hypre_SetIndex(index_temp,0,1,0); MapIndex(index_temp, cdir, index); a_ca = hypre_StructMatrixExtractPointerByIndex(A, fi, index); /*----------------------------------------------------------------- * Extract pointers for coarse grid operator * rap_cc is pointer for center coefficient (etc.) *-----------------------------------------------------------------*/ hypre_SetIndex(index_temp,0,0,0); MapIndex(index_temp, cdir, index); rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index_temp,-1,0,0); MapIndex(index_temp, cdir, index); rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index_temp,1,0,0); MapIndex(index_temp, cdir, index); rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index_temp,0,-1,0); MapIndex(index_temp, cdir, index); rap_cb = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); hypre_SetIndex(index_temp,0,1,0); MapIndex(index_temp, cdir, index); rap_ca = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index); /*----------------------------------------------------------------- * Define offsets for fine grid stencil and interpolation * * In the BoxLoop below I assume iA and iP refer to data associated * with the point which we are building the stencil for. The below * Offsets are used in refering to data associated with other points. *-----------------------------------------------------------------*/ hypre_SetIndex(index_temp,0,1,0); MapIndex(index_temp, cdir, index); OffsetP = hypre_BoxOffsetDistance(P_dbox,index); OffsetA = hypre_BoxOffsetDistance(A_dbox,index); /*-------------------------------------------------------------- * Loop for symmetric 5-point fine grid operator; produces a * symmetric 5-point coarse grid operator. *--------------------------------------------------------------*/ if ( constant_coefficient==0 ) { hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop3Begin(hypre_StructMatrixDim(A), loop_size, P_dbox, cstart, stridec, iP, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iA,iAc,iAm1,iAp1,iPm1,iPp1,west,east) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop3For(iP, iA, iAc) { iAm1 = iA - OffsetA; iAp1 = iA + OffsetA; iPm1 = iP - OffsetP; iPp1 = iP + OffsetP; rap_cb[iAc] = a_cb[iA] * pa[iPm1]; rap_ca[iAc] = a_ca[iA] * pb[iPp1]; west = a_cw[iA] + 0.5 * a_cw[iAm1] + 0.5 * a_cw[iAp1]; east = a_ce[iA] + 0.5 * a_ce[iAm1] + 0.5 * a_ce[iAp1]; /*----------------------------------------------------- * Prevent non-zero entries reaching off grid *-----------------------------------------------------*/ if(a_cw[iA] == 0.0) west = 0.0; if(a_ce[iA] == 0.0) east = 0.0; rap_cw[iAc] = west; rap_ce[iAc] = east; rap_cc[iAc] = a_cc[iA] + a_cw[iA] + a_ce[iA] + a_cb[iA] * pb[iP] + a_ca[iA] * pa[iP] - west - east; } hypre_BoxLoop3End(iP, iA, iAc); } else if ( constant_coefficient==1 ) { rap_cb[0] = rap_ca[0] = a_cb[0] * pa[0]; rap_cw[0] = rap_ce[0] = 2.0*a_cw[0]; rap_cc[0] = a_cc[0] - 2.0*( a_cw[0] - rap_cb[0] ); } else if ( constant_coefficient==2 ) { /* NOTE: This does not reduce to either of the above operators unless * the row sum is zero and the interpolation weights are 1/2 */ rap_cb[0] = rap_ca[0] = 0.5*a_cb[0]; rap_cw[0] = rap_ce[0] = 2.0*a_cw[0]; center_int = 3.0*a_cb[0]; center_bdy = 0.5*a_cb[0] + (a_cw[0] + a_cb[0]); hypre_BoxGetSize(cgrid_box, loop_size); hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, A_dbox, fstart, stridef, iA, RAP_dbox, cstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(iA, iAc) { rap_cc[iAc] = 2.0*a_cc[iA] + center_int; } hypre_BoxLoop2End(iA, iAc); hypre_CopyBox(cgrid_box, fcbox); hypre_StructMapCoarseToFine(hypre_BoxIMin(fcbox), cindex, cstride, hypre_BoxIMin(fcbox)); hypre_StructMapCoarseToFine(hypre_BoxIMax(fcbox), cindex, cstride, hypre_BoxIMax(fcbox)); hypre_BoxArraySetSize(bdy_boxes, 0); if (hypre_BoxIMinD(fcbox, cdir) == hypre_BoxIMinD(fgrid_box, cdir)) { hypre_BoxBoundaryIntersect(fcbox, fgrid, cdir, -1, bdy_boxes); } if (hypre_BoxIMaxD(fcbox, cdir) == hypre_BoxIMaxD(fgrid_box, cdir)) { hypre_BoxBoundaryIntersect(fcbox, fgrid, cdir, 1, tmp_boxes); hypre_AppendBoxArray(tmp_boxes, bdy_boxes); } hypre_ForBoxI(fbi, bdy_boxes) { bdy_box = hypre_BoxArrayBox(bdy_boxes, fbi); hypre_BoxGetSize(bdy_box, loop_size); bfstart = hypre_BoxIMin(bdy_box); hypre_StructMapFineToCoarse(bfstart, cindex, cstride, bcstart); hypre_BoxLoop2Begin(hypre_StructMatrixDim(A), loop_size, A_dbox, bfstart, stridef, iA, RAP_dbox, bcstart, stridec, iAc); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc) HYPRE_SMP_SCHEDULE #endif hypre_BoxLoop2For(iA, iAc) { rap_cc[iAc] -= 0.5*a_cc[iA] + center_bdy; } hypre_BoxLoop2End(iA, iAc); } } } /* end ForBoxI */ hypre_BoxDestroy(fcbox); hypre_BoxArrayDestroy(bdy_boxes); hypre_BoxArrayDestroy(tmp_boxes); return hypre_error_flag; }
utils.h
#pragma once #ifdef WIN32 #define _USE_MATH_DEFINES #include <math.h> #else #include <math.h> #endif #include <float.h> #include <omp.h> #include <string.h> #include <chrono> #include <memory> #include <mutex> #include <random> #include <vector> #include <open3d/geometry/Geometry.h> #include <open3d/geometry/PointCloud.h> #include <open3d/pipelines/registration/Feature.h> #include <Eigen/Dense> namespace misc3d { typedef std::shared_ptr<open3d::geometry::Geometry> GeometryPtr; typedef std::shared_ptr<open3d::geometry::PointCloud> PointCloudPtr; typedef std::shared_ptr<open3d::geometry::TriangleMesh> TriangleMeshPtr; typedef std::shared_ptr<open3d::pipelines::registration::Feature> FeaturePtr; /** * @brief Timer for duration measurement. * */ class Timer { public: void Start() { t0_ = std::chrono::high_resolution_clock::now(); } double Stop() { const double timestamp = std::chrono::duration<double>( std::chrono::high_resolution_clock::now() - t0_) .count(); return timestamp; } private: std::chrono::high_resolution_clock::time_point t0_; }; /** * @brief base sampler class * * @tparam T */ template <typename T> class Sampler { public: /** * @brief pure virtual operator, which define the I/O of this sampler * * @param sample_size * @return std::vector<T> */ virtual std::vector<T> operator()(size_t sample_size) = 0; }; /** * @brief Extract a random sample of given sample_size from the input indices * * @tparam T */ template <typename T> class RandomSampler : public Sampler<T> { public: explicit RandomSampler(const size_t size) : Sampler<T>(), size_(size) { std::random_device rd; rng_ = std::mt19937(rd()); } // This operator is usually used in for loop and sample a small subset from // original indices std::vector<T> operator()(size_t sample_size) override { // Lock this operation when using OpenMP to ensure synchronization std::lock_guard<std::mutex> guard(mutex_); std::vector<T> sample; sample.reserve(sample_size); size_t valid_sample = 0; while (valid_sample < sample_size) { size_t idx = rng_() % size_; if (std::find(sample.begin(), sample.end(), idx) == sample.end()) { sample.push_back(idx); valid_sample++; } } return sample; } // This function is usually called once to sample more than half of original // indices std::vector<T> SampleWithoutDuplicate(size_t sample_size) { std::vector<T> indices(size_); std::iota(indices.begin(), indices.end(), 0); for (size_t i = 0; i < sample_size; ++i) { std::swap(indices[i], indices[rng_() % size_]); } std::vector<T> sample; sample.reserve(sample_size); for (int idx = 0; idx < sample_size; ++idx) { sample.push_back(indices[idx]); } return sample; } private: size_t size_; std::mt19937 rng_; std::mutex mutex_; }; /** * @brief perfoem normal consistent, here we have assumption that the point * clouds are all in camera coordinate * * @param pc */ inline void NormalConsistent(open3d::geometry::PointCloud &pc) { if (!pc.HasNormals()) { return; } else { const int size = pc.points_.size(); #pragma omp parallel for for (int i = 0; i < size; i++) { if (pc.points_[i].dot(pc.normals_[i]) > 0) { pc.normals_[i] *= -1; } pc.normals_[i].normalize(); } } } /** * @brief extract data by indices * * @param src * @param index * @param dst */ template <typename T> inline void GetVectorByIndex(const std::vector<T> &src, const std::vector<size_t> &index, std::vector<T> &dst) { const size_t num = index.size(); dst.resize(num); #pragma omp parallel for for (int i = 0; i < num; i++) { dst[i] = src[index[i]]; } } /** * @brief Get Eigen matrix from vector * * @param src * @param index * @param dst */ template <typename T> inline void GetMatrixByIndex(const std::vector<Eigen::Matrix<T, 3, 1>> &src, const std::vector<size_t> &index, Eigen::Matrix<T, 3, Eigen::Dynamic> &dst) { const size_t num = index.size(); dst.setZero(3, num); if (src.size() == 0) { return; } #pragma omp parallel for for (int i = 0; i < num; i++) { dst.col(i) = src[index[i]]; } } /** * @brief data conversion * * @param pc * @param new_pc */ template <typename T> inline void EigenMatrixToVector(const Eigen::Matrix<T, Eigen::Dynamic, 3> &pc, std::vector<Eigen::Matrix<T, 3, 1>> &new_pc) { const size_t num = pc.rows(); const size_t data_length = sizeof(T) * 3; new_pc.resize(num); #pragma omp parallel for for (int i = 0; i < num; i++) { const Eigen::Matrix<T, 3, 1> &p = pc.row(i); memcpy(new_pc[i].data(), p.data(), data_length); } } /** * @brief data conversion * * @param pc * @param normal * @param new_pc */ template <typename T> inline void EigenMatrixToVector( const Eigen::Matrix<T, Eigen::Dynamic, 3> &pc, const Eigen::Matrix<T, Eigen::Dynamic, 3> &normal, std::vector<Eigen::Matrix<T, 6, 1>> &new_pc) { const size_t num = pc.rows(); const size_t data_length = sizeof(T) * 3; new_pc.resize(num); #pragma omp parallel for for (int i = 0; i < num; i++) { const Eigen::Matrix<T, 3, 1> &p = pc.row(i); const Eigen::Matrix<T, 3, 1> &n = normal.row(i); memcpy(new_pc[i].data(), p.data(), data_length); memcpy(new_pc[i].data() + 3, n.data(), data_length); } } /** * @brief data conversion * * @param pc * @param new_pc */ template <typename T> inline void VectorToEigenMatrix(const std::vector<Eigen::Matrix<T, 3, 1>> &pc, Eigen::Matrix<T, 3, Eigen::Dynamic> &new_pc) { const size_t num = pc.size(); new_pc.setZero(3, num); #pragma omp parallel for for (int i = 0; i < num; i++) { new_pc.col(i) = pc[i]; } } /** * @brief data conversion * * @param pc * @param new_pc */ template <typename T> inline void VectorToEigenMatrix(const std::vector<Eigen::Matrix<T, 6, 1>> &pc, Eigen::Matrix<T, 6, Eigen::Dynamic> &new_pc) { const size_t num = pc.size(); new_pc.setZero(6, num); #pragma omp parallel for for (int i = 0; i < num; i++) { new_pc.col(i) = pc[i]; } } /** * @brief Compute the coordinate transformation between the target coordinate * and origin coordinate * * @param x_head Point at target coordinate x-axis * @param origin Point at target coordinate origin * @param ref Point at target coordinate x-y plane * @return Eigen::Matrix<T, 4, 4> */ template <typename T> inline Eigen::Matrix<T, 4, 4> CalcCoordinateTransform( const Eigen::Matrix<T, 3, 1> &x_head, const Eigen::Matrix<T, 3, 1> &origin, const Eigen::Matrix<T, 3, 1> &ref) { const Eigen::Matrix<T, 3, 1> x_axis = (x_head - origin) / (x_head - origin).norm(); const Eigen::Matrix<T, 3, 1> tmp_axis = (ref - origin) / (ref - origin).norm(); Eigen::Matrix<T, 3, 1> z_axis = x_axis.cross(tmp_axis); if (z_axis.dot(Eigen::Matrix<T, 3, 1>(0, 0, 1)) > 0) { z_axis /= z_axis.norm(); } else { z_axis /= -z_axis.norm(); } Eigen::Matrix<T, 3, 1> y_axis = z_axis.cross(x_axis); y_axis /= y_axis.norm(); Eigen::Matrix<T, 4, 4> transform; transform << x_axis(0), y_axis(0), z_axis(0), origin(0), x_axis(1), y_axis(1), z_axis(1), origin(1), x_axis(2), y_axis(2), z_axis(2), origin(2), 0, 0, 0, 1; return transform; } /** * @brief compute point to line distance * * @tparam T * @param query * @param point1 * @param point2 * @return T */ template <typename T> inline T CalcPoint2LineDistance(const Eigen::Matrix<T, 3, 1> &query, const Eigen::Matrix<T, 3, 1> &point1, const Eigen::Matrix<T, 3, 1> &point2) { const Eigen::Matrix<T, 3, 1> a = query - point1; const Eigen::Matrix<T, 3, 1> b = query - point2; const Eigen::Matrix<T, 3, 1> c = point2 - point1; return a.cross(b).norm() / c.norm(); } /** * @brief convert degree to radian * * @param angle_deg * @return double */ template <typename T> inline T Deg2Rad(const T angle_deg) { return angle_deg / 180 * M_PI; } /** * @brief convert radian to degree * * @param angle_rad * @return double */ template <typename T> inline T Rad2Deg(const T angle_rad) { return angle_rad / M_PI * 180; } /** * @brief data type conversion * * @param pc * @param new_pc */ inline void VectorToO3dPointCloud(const std::vector<Eigen::Vector6d> &pc, open3d::geometry::PointCloud &new_pc) { const int n_pt = pc.size(); const size_t data_length = sizeof(double) * 3; new_pc.points_.resize(n_pt); new_pc.normals_.resize(n_pt); #pragma omp parallel for for (int i = 0; i < n_pt; i++) { memcpy(new_pc.points_[i].data(), pc[i].data(), data_length); memcpy(new_pc.normals_[i].data(), pc[i].data() + 3, data_length); } } /** * @brief Convert 4x4 Mat to 16 array * * @tparam T * @param mat * @param array */ template <typename T> inline void EigenMat4x4ToArray(const Eigen::Matrix<T, 4, 4> &mat, std::array<T, 16> &array) { for (size_t i = 0; i < 4; i++) { for (size_t j = 0; j < 4; j++) { array[i * 4 + j] = mat(i, j); } } } /** * @brief Convert 16 array to 4x4 Mat * * @tparam T * @param array * @param mat */ template <typename T> inline void ArrayToEigenMat4x4(const std::array<T, 16> &array, Eigen::Matrix<T, 4, 4> &mat) { for (size_t i = 0; i < 4; i++) { for (size_t j = 0; j < 4; j++) { mat(i, j) = array[i * 4 + j]; } } } /** * @brief Get new eigen matrix by indices * * @param mat * @param index * @return Eigen::Matrix3Xd */ inline Eigen::Matrix3Xd SelectByIndexEigenMat(const Eigen::Matrix3Xd &mat, const std::vector<size_t> &indices) { Eigen::Matrix3Xd new_mat; new_mat.resize(3, indices.size()); #pragma omp parallel for for (int i = 0; i < indices.size(); i++) { new_mat.col(i) = mat.col(indices[i]); } return new_mat; } } // namespace misc3d
local_estimator.h
/* * estimator.h * * Created on: Mar 22, 2012 * Author: aitor */ #ifndef REC_FRAMEWORK_LOCAL_ESTIMATOR_H_ #define REC_FRAMEWORK_LOCAL_ESTIMATOR_H_ #include <pcl/apps/3d_rec_framework/feature_wrapper/normal_estimator.h> #include <pcl/keypoints/uniform_sampling.h> #include <pcl/surface/mls.h> #include <pcl/keypoints/harris_3d.h> #include <pcl/keypoints/sift_keypoint.h> #include <pcl/keypoints/susan.h> namespace pcl { template <> struct SIFTKeypointFieldSelector<PointXYZ> { inline float operator()(const PointXYZ &p) const { return p.z; } }; } // namespace pcl namespace pcl { namespace rec_3d_framework { template <typename PointInT> class KeypointExtractor { protected: typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; typedef typename pcl::PointCloud<PointInT>::Ptr PointOutTPtr; typename pcl::PointCloud<PointInT>::Ptr input_; float radius_; public: void setInputCloud(PointInTPtr &input) { input_ = input; } void setSupportRadius(float f) { radius_ = f; } virtual void compute(PointOutTPtr &keypoints) = 0; virtual void setNormals(const pcl::PointCloud<pcl::Normal>::Ptr & /*normals*/) {} virtual bool needNormals() { return false; } }; template <typename PointInT> class UniformSamplingExtractor : public KeypointExtractor<PointInT> { private: typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; bool filter_planar_; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; float sampling_density_; boost::shared_ptr<std::vector<std::vector<int>>> neighborhood_indices_; boost::shared_ptr<std::vector<std::vector<float>>> neighborhood_dist_; void filterPlanar(PointInTPtr &input, pcl::PointCloud<int> &keypoints_cloud) { pcl::PointCloud<int> filtered_keypoints; // create a search object typename pcl::search::Search<PointInT>::Ptr tree; if (input->isOrganized()) tree.reset(new pcl::search::OrganizedNeighbor<PointInT>()); else tree.reset(new pcl::search::KdTree<PointInT>(false)); tree->setInputCloud(input); neighborhood_indices_.reset(new std::vector<std::vector<int>>); neighborhood_indices_->resize(keypoints_cloud.points.size()); neighborhood_dist_.reset(new std::vector<std::vector<float>>); neighborhood_dist_->resize(keypoints_cloud.points.size()); filtered_keypoints.points.resize(keypoints_cloud.points.size()); int good = 0; for (size_t i = 0; i < keypoints_cloud.points.size(); i++) { if (tree->radiusSearch(keypoints_cloud[i], radius_, (*neighborhood_indices_)[good], (*neighborhood_dist_)[good])) { EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; Eigen::Vector4f xyz_centroid; EIGEN_ALIGN16 Eigen::Vector3f eigenValues; EIGEN_ALIGN16 Eigen::Matrix3f eigenVectors; // compute planarity of the region computeMeanAndCovarianceMatrix(*input, (*neighborhood_indices_)[good], covariance_matrix, xyz_centroid); pcl::eigen33(covariance_matrix, eigenVectors, eigenValues); float eigsum = eigenValues.sum(); if (!pcl_isfinite(eigsum)) { PCL_ERROR("Eigen sum is not finite\n"); } if ((fabs(eigenValues[0] - eigenValues[1]) < 1.5e-4) || (eigsum != 0 && fabs(eigenValues[0] / eigsum) > 1.e-2)) { // region is not planar, add to filtered keypoint keypoints_cloud.points[good] = keypoints_cloud.points[i]; good++; } } } neighborhood_indices_->resize(good); neighborhood_dist_->resize(good); keypoints_cloud.points.resize(good); neighborhood_indices_->clear(); neighborhood_dist_->clear(); } public: void setFilterPlanar(bool b) { filter_planar_ = b; } void setSamplingDensity(float f) { sampling_density_ = f; } void compute(PointInTPtr &keypoints) { keypoints.reset(new pcl::PointCloud<PointInT>); pcl::UniformSampling<PointInT> keypoint_extractor; keypoint_extractor.setRadiusSearch(sampling_density_); keypoint_extractor.setInputCloud(input_); pcl::PointCloud<int> keypoints_idxes; keypoint_extractor.compute(keypoints_idxes); if (filter_planar_) filterPlanar(input_, keypoints_idxes); std::vector<int> indices; indices.resize(keypoints_idxes.points.size()); for (size_t i = 0; i < indices.size(); i++) indices[i] = keypoints_idxes.points[i]; pcl::copyPointCloud(*input_, indices, *keypoints); } }; template <typename PointInT> class SIFTKeypointExtractor : public KeypointExtractor<PointInT> { typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; public: void compute(PointInTPtr &keypoints) { keypoints.reset(new pcl::PointCloud<PointInT>); typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints( new pcl::PointCloud<pcl::PointXYZI>); pcl::SIFTKeypoint<PointInT, pcl::PointXYZI> sift3D; sift3D.setScales(0.003f, 3, 2); sift3D.setMinimumContrast(0.1f); sift3D.setInputCloud(input_); sift3D.setSearchSurface(input_); sift3D.compute(*intensity_keypoints); pcl::copyPointCloud(*intensity_keypoints, *keypoints); } }; template <typename PointInT> class SIFTSurfaceKeypointExtractor : public KeypointExtractor<PointInT> { typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; pcl::PointCloud<pcl::Normal>::Ptr normals_; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; bool needNormals() { return true; } void setNormals(const pcl::PointCloud<pcl::Normal>::Ptr &normals) { normals_ = normals; } public: void compute(PointInTPtr &keypoints) { if (normals_ == 0 || (normals_->points.size() != input_->points.size())) PCL_WARN("SIFTSurfaceKeypointExtractor -- Normals are not valid\n"); keypoints.reset(new pcl::PointCloud<PointInT>); typename pcl::PointCloud<pcl::PointNormal>::Ptr input_cloud( new pcl::PointCloud<pcl::PointNormal>); input_cloud->width = input_->width; input_cloud->height = input_->height; input_cloud->points.resize(input_->width * input_->height); for (size_t i = 0; i < input_->points.size(); i++) { input_cloud->points[i].getVector3fMap() = input_->points[i].getVector3fMap(); input_cloud->points[i].getNormalVector3fMap() = normals_->points[i].getNormalVector3fMap(); } typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints( new pcl::PointCloud<pcl::PointXYZI>); pcl::SIFTKeypoint<pcl::PointNormal, pcl::PointXYZI> sift3D; sift3D.setScales(0.003f, 3, 2); sift3D.setMinimumContrast(0.0); sift3D.setInputCloud(input_cloud); sift3D.setSearchSurface(input_cloud); sift3D.compute(*intensity_keypoints); pcl::copyPointCloud(*intensity_keypoints, *keypoints); } }; template <typename PointInT, typename NormalT = pcl::Normal> class HarrisKeypointExtractor : public KeypointExtractor<PointInT> { pcl::PointCloud<pcl::Normal>::Ptr normals_; typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m_; float non_max_radius_; float threshold_; public: HarrisKeypointExtractor() { m_ = pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::HARRIS; non_max_radius_ = 0.01f; threshold_ = 0.f; } bool needNormals() { return true; } void setNormals(const pcl::PointCloud<pcl::Normal>::Ptr &normals) { normals_ = normals; } void setThreshold(float t) { threshold_ = t; } void setResponseMethod( typename pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI>::ResponseMethod m) { m_ = m; } void setNonMaximaRadius(float r) { non_max_radius_ = r; } void compute(PointInTPtr &keypoints) { keypoints.reset(new pcl::PointCloud<PointInT>); if (normals_ == 0 || (normals_->points.size() != input_->points.size())) PCL_WARN("HarrisKeypointExtractor -- Normals are not valid\n"); typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints( new pcl::PointCloud<pcl::PointXYZI>); pcl::HarrisKeypoint3D<PointInT, pcl::PointXYZI> harris; harris.setNonMaxSupression(true); harris.setRefine(false); harris.setThreshold(threshold_); harris.setInputCloud(input_); harris.setNormals(normals_); harris.setRadius(non_max_radius_); harris.setRadiusSearch(non_max_radius_); harris.setMethod(m_); harris.compute(*intensity_keypoints); pcl::copyPointCloud(*intensity_keypoints, *keypoints); } }; template <typename PointInT, typename NormalT = pcl::Normal> class SUSANKeypointExtractor : public KeypointExtractor<PointInT> { pcl::PointCloud<pcl::Normal>::Ptr normals_; typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; using KeypointExtractor<PointInT>::input_; using KeypointExtractor<PointInT>::radius_; public: SUSANKeypointExtractor() {} bool needNormals() { return true; } void setNormals(const pcl::PointCloud<pcl::Normal>::Ptr &normals) { normals_ = normals; } void compute(PointInTPtr &keypoints) { keypoints.reset(new pcl::PointCloud<PointInT>); if (normals_ == 0 || (normals_->points.size() != input_->points.size())) PCL_WARN("SUSANKeypointExtractor -- Normals are not valid\n"); typename pcl::PointCloud<pcl::PointXYZI>::Ptr intensity_keypoints( new pcl::PointCloud<pcl::PointXYZI>); pcl::SUSAN<PointInT, pcl::PointXYZI> susan; susan.setNonMaxSupression(true); susan.setInputCloud(input_); susan.setNormals(normals_); susan.setRadius(0.01f); susan.setRadiusSearch(0.01f); susan.compute(*intensity_keypoints); pcl::copyPointCloud(*intensity_keypoints, *keypoints); } }; template <typename PointInT, typename FeatureT> class LocalEstimator { protected: typedef typename pcl::PointCloud<PointInT>::Ptr PointInTPtr; typedef typename pcl::PointCloud<FeatureT>::Ptr FeatureTPtr; typename boost::shared_ptr< PreProcessorAndNormalEstimator<PointInT, pcl::Normal>> normal_estimator_; // typename boost::shared_ptr<UniformSampling<PointInT> > // keypoint_extractor_; std::vector<typename boost::shared_ptr<KeypointExtractor<PointInT>>> keypoint_extractor_; // this should be a vector float support_radius_; // bool filter_planar_; bool adaptative_MLS_; boost::shared_ptr<std::vector<std::vector<int>>> neighborhood_indices_; boost::shared_ptr<std::vector<std::vector<float>>> neighborhood_dist_; // std::vector< std::vector<int> > neighborhood_indices_; // std::vector< std::vector<float> > neighborhood_dist_; void computeKeypoints(PointInTPtr &cloud, PointInTPtr &keypoints, pcl::PointCloud<pcl::Normal>::Ptr &normals) { keypoints.reset(new pcl::PointCloud<PointInT>); for (size_t i = 0; i < keypoint_extractor_.size(); i++) { keypoint_extractor_[i]->setInputCloud(cloud); if (keypoint_extractor_[i]->needNormals()) keypoint_extractor_[i]->setNormals(normals); keypoint_extractor_[i]->setSupportRadius(support_radius_); PointInTPtr detected_keypoints; keypoint_extractor_[i]->compute(detected_keypoints); *keypoints += *detected_keypoints; } } public: LocalEstimator() { adaptative_MLS_ = false; keypoint_extractor_.clear(); } void setAdaptativeMLS(bool b) { adaptative_MLS_ = b; } virtual bool estimate(PointInTPtr &in, PointInTPtr &processed, PointInTPtr &keypoints, FeatureTPtr &signatures) = 0; void setNormalEstimator( boost::shared_ptr<PreProcessorAndNormalEstimator<PointInT, pcl::Normal>> &ne) { normal_estimator_ = ne; } /** * \brief Right now only uniformSampling keypoint extractor is allowed */ void addKeypointExtractor(boost::shared_ptr<KeypointExtractor<PointInT>> &ke) { keypoint_extractor_.push_back(ke); } void setKeypointExtractors( std::vector<typename boost::shared_ptr<KeypointExtractor<PointInT>>> &ke) { keypoint_extractor_ = ke; } void setSupportRadius(float r) { support_radius_ = r; } /*void setFilterPlanar (bool b) { filter_planar_ = b; } void filterPlanar (PointInTPtr & input, KeypointCloud & keypoints_cloud) { pcl::PointCloud<int> filtered_keypoints; //create a search object typename pcl::search::Search<PointInT>::Ptr tree; if (input->isOrganized ()) tree.reset (new pcl::search::OrganizedNeighbor<PointInT> ()); else tree.reset (new pcl::search::KdTree<PointInT> (false)); tree->setInputCloud (input); //std::vector<int> nn_indices; //std::vector<float> nn_distances; neighborhood_indices_.reset (new std::vector<std::vector<int> >); neighborhood_indices_->resize (keypoints_cloud.points.size ()); neighborhood_dist_.reset (new std::vector<std::vector<float> >); neighborhood_dist_->resize (keypoints_cloud.points.size ()); filtered_keypoints.points.resize (keypoints_cloud.points.size ()); int good = 0; //#pragma omp parallel for num_threads(8) for (size_t i = 0; i < keypoints_cloud.points.size (); i++) { if (tree->radiusSearch (keypoints_cloud[i], support_radius_, (*neighborhood_indices_)[good], (*neighborhood_dist_)[good])) { EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix; Eigen::Vector4f xyz_centroid; EIGEN_ALIGN16 Eigen::Vector3f eigenValues; EIGEN_ALIGN16 Eigen::Matrix3f eigenVectors; //compute planarity of the region computeMeanAndCovarianceMatrix (*input, (*neighborhood_indices_)[good], covariance_matrix, xyz_centroid); pcl::eigen33 (covariance_matrix, eigenVectors, eigenValues); float eigsum = eigenValues.sum (); if (!pcl_isfinite(eigsum)) { PCL_ERROR("Eigen sum is not finite\n"); } if ((fabs (eigenValues[0] - eigenValues[1]) < 1.5e-4) || (eigsum != 0 && fabs (eigenValues[0] / eigsum) > 1.e-2)) { //region is not planar, add to filtered keypoint keypoints_cloud.points[good] = keypoints_cloud.points[i]; good++; } } } neighborhood_indices_->resize (good); neighborhood_dist_->resize (good); keypoints_cloud.points.resize (good); }*/ }; } // namespace rec_3d_framework } // namespace pcl #endif /* REC_FRAMEWORK_LOCAL_ESTIMATOR_H_ */
ast-dump-openmp-begin-declare-variant_6.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics int also_before(void) { return 0; } #pragma omp begin declare variant match(implementation={vendor(ibm)}) int also_after(void) { return 1; } int also_before(void) { return 2; } #pragma omp end declare variant int also_after(void) { return 0; } int main() { // Should return 0. return also_after() + also_before(); } // Make sure: // - we see the specialization in the AST // - we do use the original pointers for the calls as the variants are not applicable (this is not the ibm compiler). // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_4:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(ibm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_5:0x[a-z0-9]*]] <line:13:1> 'int ({{.*}})' Function [[ADDR_6:0x[a-z0-9]*]] 'also_before[implementation={vendor(ibm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] <line:10:1, col:20> col:5 implicit used also_after 'int ({{.*}})' // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_8:0x[a-z0-9]*]] <<invalid sloc>> Implicit implementation={vendor(ibm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9:0x[a-z0-9]*]] <col:1> 'int ({{.*}})' Function [[ADDR_10:0x[a-z0-9]*]] 'also_after[implementation={vendor(ibm)}]' 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_10]] <col:1, line:12:1> line:10:1 also_after[implementation={vendor(ibm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_11:0x[a-z0-9]*]] <col:22, line:12:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_12:0x[a-z0-9]*]] <line:11:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_13:0x[a-z0-9]*]] <col:10> 'int' 1 // CHECK-NEXT: |-FunctionDecl [[ADDR_6]] <line:13:1, line:15:1> line:13:1 also_before[implementation={vendor(ibm)}] 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_14:0x[a-z0-9]*]] <col:23, line:15:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_15:0x[a-z0-9]*]] <line:14:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int' 2 // CHECK-NEXT: |-FunctionDecl [[ADDR_17:0x[a-z0-9]*]] prev [[ADDR_7]] <line:18:1, line:20:1> line:18:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | |-CompoundStmt [[ADDR_18:0x[a-z0-9]*]] <col:22, line:20:1> // CHECK-NEXT: | | `-ReturnStmt [[ADDR_19:0x[a-z0-9]*]] <line:19:3, col:10> // CHECK-NEXT: | | `-IntegerLiteral [[ADDR_20:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: | `-OMPDeclareVariantAttr [[ADDR_21:0x[a-z0-9]*]] <<invalid sloc>> Inherited Implicit implementation={vendor(ibm)} // CHECK-NEXT: | `-DeclRefExpr [[ADDR_9]] <line:10:1> 'int ({{.*}})' Function [[ADDR_10]] 'also_after[implementation={vendor(ibm)}]' 'int ({{.*}})' // CHECK-NEXT: `-FunctionDecl [[ADDR_22:0x[a-z0-9]*]] <line:22:1, line:25:1> line:22:5 main 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_23:0x[a-z0-9]*]] <col:12, line:25:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_24:0x[a-z0-9]*]] <line:24:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_25:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-CallExpr [[ADDR_26:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_27:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_28:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_17]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_29:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_30:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_31:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_0]] 'also_before' 'int ({{.*}})'
node_composite_defocus.c
/* * $Id: node_composite_defocus.c 39944 2011-09-05 22:04:30Z gsrb3d $ * * ***** BEGIN GPL LICENSE BLOCK ***** * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) 2006 Blender Foundation. * All rights reserved. * * The Original Code is: all of this file. * * Contributor(s): none yet. * * ***** END GPL LICENSE BLOCK ***** */ /** \file blender/nodes/composite/nodes/node_composite_defocus.c * \ingroup cmpnodes */ #include "node_composite_util.h" /* ************ qdn: Defocus node ****************** */ static bNodeSocketTemplate cmp_node_defocus_in[]= { { SOCK_RGBA, 1, "Image", 0.8f, 0.8f, 0.8f, 1.0f}, { SOCK_FLOAT, 1, "Z", 0.8f, 0.8f, 0.8f, 1.0f, 0.0f, 1.0f, PROP_FACTOR}, { -1, 0, "" } }; static bNodeSocketTemplate cmp_node_defocus_out[]= { { SOCK_RGBA, 0, "Image"}, { -1, 0, "" } }; // line coefs for point sampling & scancon. data. typedef struct BokehCoeffs { float x0, y0, dx, dy; float ls_x, ls_y; float min_x, min_y, max_x, max_y; } BokehCoeffs; // returns array of BokehCoeffs // returns length of array in 'len_bkh', // radius squared of inscribed disk in 'inradsq', needed in getWeight() test, // BKH[8] is the data returned for the bokeh shape & bkh_b[4] is it's 2d bound static void makeBokeh(char bktype, char ro, int* len_bkh, float* inradsq, BokehCoeffs BKH[8], float bkh_b[4]) { float x0, x1, y0, y1, dx, dy, iDxy; float w = MAX2(1e-5f, ro)*M_PI/180.f; // never reported stangely enough, but a zero offset causes missing center line... float wi = (360.f/bktype)*M_PI/180.f; int i, ov, nv; // bktype must be at least 3 & <= 8 bktype = (bktype<3) ? 3 : ((bktype>8) ? 8 : bktype); *len_bkh = bktype; *inradsq = -1.f; for (i=0; i<(*len_bkh); i++) { x0 = cos(w); y0 = sin(w); w += wi; x1 = cos(w); y1 = sin(w); if ((*inradsq)<0.f) { // radius squared of inscribed disk float idx=(x0+x1)*0.5f, idy=(y0+y1)*0.5f; *inradsq = idx*idx + idy*idy; } BKH[i].x0 = x0; BKH[i].y0 = y0; dx = x1-x0, dy = y1-y0; iDxy = 1.f / sqrt(dx*dx + dy*dy); dx *= iDxy; dy *= iDxy; BKH[i].dx = dx; BKH[i].dy = dy; } // precalc scanconversion data // bokeh bound, not transformed, for scanconvert bkh_b[0] = bkh_b[2] = 1e10f; // xmin/ymin bkh_b[1] = bkh_b[3] = -1e10f; // xmax/ymax ov = (*len_bkh) - 1; for (nv=0; nv<(*len_bkh); nv++) { bkh_b[0] = MIN2(bkh_b[0], BKH[nv].x0); // xmin bkh_b[1] = MAX2(bkh_b[1], BKH[nv].x0); // xmax bkh_b[2] = MIN2(bkh_b[2], BKH[nv].y0); // ymin bkh_b[3] = MAX2(bkh_b[3], BKH[nv].y0); // ymax BKH[nv].min_x = MIN2(BKH[ov].x0, BKH[nv].x0); BKH[nv].max_x = MAX2(BKH[ov].x0, BKH[nv].x0); BKH[nv].min_y = MIN2(BKH[ov].y0, BKH[nv].y0); BKH[nv].max_y = MAX2(BKH[ov].y0, BKH[nv].y0); dy = BKH[nv].y0 - BKH[ov].y0; BKH[nv].ls_x = (BKH[nv].x0 - BKH[ov].x0) / ((dy==0.f) ? 1.f : dy); BKH[nv].ls_y = (BKH[nv].ls_x==0.f) ? 1.f : (1.f/BKH[nv].ls_x); ov = nv; } } // test if u/v inside shape & returns weight value static float getWeight(BokehCoeffs* BKH, int len_bkh, float u, float v, float rad, float inradsq) { BokehCoeffs* bc = BKH; float cdist, irad = (rad==0.f) ? 1.f : (1.f/rad); u *= irad; v *= irad; // early out test1: if point outside outer unit disk, it cannot be inside shape cdist = u*u + v*v; if (cdist>1.f) return 0.f; // early out test2: if point inside or on inner disk, point must be inside shape if (cdist<=inradsq) return 1.f; while (len_bkh--) { if ((bc->dy*(u - bc->x0) - bc->dx*(v - bc->y0)) > 0.f) return 0.f; bc++; } return 1.f; } // QMC.seq. for sampling, A.Keller, EMS static float RI_vdC(unsigned int bits, unsigned int r) { bits = ( bits << 16) | ( bits >> 16); bits = ((bits & 0x00ff00ff) << 8) | ((bits & 0xff00ff00) >> 8); bits = ((bits & 0x0f0f0f0f) << 4) | ((bits & 0xf0f0f0f0) >> 4); bits = ((bits & 0x33333333) << 2) | ((bits & 0xcccccccc) >> 2); bits = ((bits & 0x55555555) << 1) | ((bits & 0xaaaaaaaa) >> 1); bits ^= r; return (float)((double)bits / 4294967296.0); } // single channel IIR gaussian filtering // much faster than anything else, constant time independent of width // should extend to multichannel and make this a node, could be useful static void IIR_gauss_single(CompBuf* buf, float sigma) { double q, q2, sc, cf[4], tsM[9], tsu[3], tsv[3]; float *X, *Y, *W; int i, x, y, sz; // single channel only for now if (buf->type != CB_VAL) return; // <0.5 not valid, though can have a possibly useful sort of sharpening effect if (sigma < 0.5) return; // see "Recursive Gabor Filtering" by Young/VanVliet // all factors here in double.prec. Required, because for single.prec it seems to blow up if sigma > ~200 if (sigma >= 3.556) q = 0.9804*(sigma - 3.556) + 2.5091; else // sigma >= 0.5 q = (0.0561*sigma + 0.5784)*sigma - 0.2568; q2 = q*q; sc = (1.1668 + q)*(3.203729649 + (2.21566 + q)*q); // no gabor filtering here, so no complex multiplies, just the regular coefs. // all negated here, so as not to have to recalc Triggs/Sdika matrix cf[1] = q*(5.788961737 + (6.76492 + 3.0*q)*q)/ sc; cf[2] = -q2*(3.38246 + 3.0*q)/sc; // 0 & 3 unchanged cf[3] = q2*q/sc; cf[0] = 1.0 - cf[1] - cf[2] - cf[3]; // Triggs/Sdika border corrections, // it seems to work, not entirely sure if it is actually totally correct, // Besides J.M.Geusebroek's anigauss.c (see http://www.science.uva.nl/~mark), // found one other implementation by Cristoph Lampert, // but neither seem to be quite the same, result seems to be ok sofar anyway. // Extra scale factor here to not have to do it in filter, // though maybe this had something to with the precision errors sc = cf[0]/((1.0 + cf[1] - cf[2] + cf[3])*(1.0 - cf[1] - cf[2] - cf[3])*(1.0 + cf[2] + (cf[1] - cf[3])*cf[3])); tsM[0] = sc*(-cf[3]*cf[1] + 1.0 - cf[3]*cf[3] - cf[2]); tsM[1] = sc*((cf[3] + cf[1])*(cf[2] + cf[3]*cf[1])); tsM[2] = sc*(cf[3]*(cf[1] + cf[3]*cf[2])); tsM[3] = sc*(cf[1] + cf[3]*cf[2]); tsM[4] = sc*(-(cf[2] - 1.0)*(cf[2] + cf[3]*cf[1])); tsM[5] = sc*(-(cf[3]*cf[1] + cf[3]*cf[3] + cf[2] - 1.0)*cf[3]); tsM[6] = sc*(cf[3]*cf[1] + cf[2] + cf[1]*cf[1] - cf[2]*cf[2]); tsM[7] = sc*(cf[1]*cf[2] + cf[3]*cf[2]*cf[2] - cf[1]*cf[3]*cf[3] - cf[3]*cf[3]*cf[3] - cf[3]*cf[2] + cf[3]); tsM[8] = sc*(cf[3]*(cf[1] + cf[3]*cf[2])); #define YVV(L)\ {\ W[0] = cf[0]*X[0] + cf[1]*X[0] + cf[2]*X[0] + cf[3]*X[0];\ W[1] = cf[0]*X[1] + cf[1]*W[0] + cf[2]*X[0] + cf[3]*X[0];\ W[2] = cf[0]*X[2] + cf[1]*W[1] + cf[2]*W[0] + cf[3]*X[0];\ for (i=3; i<L; i++)\ W[i] = cf[0]*X[i] + cf[1]*W[i-1] + cf[2]*W[i-2] + cf[3]*W[i-3];\ tsu[0] = W[L-1] - X[L-1];\ tsu[1] = W[L-2] - X[L-1];\ tsu[2] = W[L-3] - X[L-1];\ tsv[0] = tsM[0]*tsu[0] + tsM[1]*tsu[1] + tsM[2]*tsu[2] + X[L-1];\ tsv[1] = tsM[3]*tsu[0] + tsM[4]*tsu[1] + tsM[5]*tsu[2] + X[L-1];\ tsv[2] = tsM[6]*tsu[0] + tsM[7]*tsu[1] + tsM[8]*tsu[2] + X[L-1];\ Y[L-1] = cf[0]*W[L-1] + cf[1]*tsv[0] + cf[2]*tsv[1] + cf[3]*tsv[2];\ Y[L-2] = cf[0]*W[L-2] + cf[1]*Y[L-1] + cf[2]*tsv[0] + cf[3]*tsv[1];\ Y[L-3] = cf[0]*W[L-3] + cf[1]*Y[L-2] + cf[2]*Y[L-1] + cf[3]*tsv[0];\ for (i=L-4; i>=0; i--)\ Y[i] = cf[0]*W[i] + cf[1]*Y[i+1] + cf[2]*Y[i+2] + cf[3]*Y[i+3];\ } // intermediate buffers sz = MAX2(buf->x, buf->y); Y = MEM_callocN(sz*sizeof(float), "IIR_gauss Y buf"); W = MEM_callocN(sz*sizeof(float), "IIR_gauss W buf"); // H for (y=0; y<buf->y; y++) { X = &buf->rect[y*buf->x]; YVV(buf->x); memcpy(X, Y, sizeof(float)*buf->x); } // V X = MEM_callocN(buf->y*sizeof(float), "IIR_gauss X buf"); for (x=0; x<buf->x; x++) { for (y=0; y<buf->y; y++) X[y] = buf->rect[x + y*buf->x]; YVV(buf->y); for (y=0; y<buf->y; y++) buf->rect[x + y*buf->x] = Y[y]; } MEM_freeN(X); MEM_freeN(W); MEM_freeN(Y); #undef YVV } static void defocus_blur(bNode *node, CompBuf *new, CompBuf *img, CompBuf *zbuf, float inpval, int no_zbuf) { NodeDefocus *nqd = node->storage; CompBuf *wts; // weights buffer CompBuf *crad; // CoC radius buffer BokehCoeffs BKH[8]; // bokeh shape data, here never > 8 pts. float bkh_b[4] = {0}; // shape 2D bound float cam_fdist=1, cam_invfdist=1, cam_lens=35; float dof_sp, maxfgc, bk_hn_theta=0, inradsq=0; int y, len_bkh=0, ydone=0; float aspect, aperture; int minsz; //float bcrad, nmaxc, scf; // get some required params from the current scene camera // (ton) this is wrong, needs fixed Scene *scene= (Scene*)node->id; Object* camob = (scene)? scene->camera: NULL; if (camob && camob->type==OB_CAMERA) { Camera* cam = (Camera*)camob->data; cam_lens = cam->lens; cam_fdist = dof_camera(camob); if (cam_fdist==0.0) cam_fdist = 1e10f; /* if the dof is 0.0 then set it be be far away */ cam_invfdist = 1.f/cam_fdist; } // guess work here.. best match with raytraced result minsz = MIN2(img->x, img->y); dof_sp = (float)minsz / (16.f / cam_lens); // <- == aspect * MIN2(img->x, img->y) / tan(0.5f * fov); // aperture aspect = (img->x > img->y) ? (img->y / (float)img->x) : (img->x / (float)img->y); aperture = 0.5f*(cam_lens / (aspect*32.f)) / nqd->fstop; // if not disk, make bokeh coefficients and other needed data if (nqd->bktype!=0) { makeBokeh(nqd->bktype, nqd->rotation, &len_bkh, &inradsq, BKH, bkh_b); bk_hn_theta = 0.5 * nqd->bktype * sin(2.0 * M_PI / nqd->bktype); // weight factor } // accumulated weights wts = alloc_compbuf(img->x, img->y, CB_VAL, 1); // CoC radius buffer crad = alloc_compbuf(img->x, img->y, CB_VAL, 1); // if 'no_zbuf' flag set (which is always set if input is not an image), // values are instead interpreted directly as blur radius values if (no_zbuf) { // to prevent *reaaallly* big radius values and impossible calculation times, // limit the maximum to half the image width or height, whichever is smaller float maxr = 0.5f*(float)MIN2(img->x, img->y); unsigned int p; for (p=0; p<(unsigned int)(img->x*img->y); p++) { crad->rect[p] = zbuf ? (zbuf->rect[p]*nqd->scale) : inpval; // bug #5921, limit minimum crad->rect[p] = MAX2(1e-5f, crad->rect[p]); crad->rect[p] = MIN2(crad->rect[p], maxr); // if maxblur!=0, limit maximum if (nqd->maxblur != 0.f) crad->rect[p] = MIN2(crad->rect[p], nqd->maxblur); } } else { float wt; // actual zbuffer. // separate foreground from background CoC's // then blur background and blend in again with foreground, // improves the 'blurred foreground overlapping in-focus midground' sharp boundary problem. // wts buffer here used for blendmask maxfgc = 0.f; // maximum foreground CoC radius for (y=0; y<img->y; y++) { unsigned int p = y * img->x; int x; for (x=0; x<img->x; x++) { unsigned int px = p + x; float iZ = (zbuf->rect[px]==0.f) ? 0.f : (1.f/zbuf->rect[px]); crad->rect[px] = 0.5f*(aperture*(dof_sp*(cam_invfdist - iZ) - 1.f)); if (crad->rect[px] <= 0.f) { wts->rect[px] = 1.f; crad->rect[px] = -crad->rect[px]; if (crad->rect[px] > maxfgc) maxfgc = crad->rect[px]; } else crad->rect[px] = wts->rect[px] = 0; } } // fast blur... // bug #6656 part 1, probably when previous node_composite.c was split into separate files, it was not properly updated // to include recent cvs commits (well, at least not defocus node), so this part was missing... wt = aperture*128.f; IIR_gauss_single(crad, wt); IIR_gauss_single(wts, wt); // bug #6656 part 2a, although foreground blur is not based anymore on closest object, // the rescaling op below was still based on that anyway, and unlike the comment in below code, // the difference is therefore not always that small at all... // so for now commented out, not sure if this is going to cause other future problems, lets just wait and see... /* // find new maximum to scale it back to original // (could skip this, not strictly necessary, in general, difference is quite small, but just in case...) nmaxc = 0; for (p=0; p<(img->x*img->y); p++) if (crad->rect[p] > nmaxc) nmaxc = crad->rect[p]; // rescale factor scf = (nmaxc==0.f) ? 1.f: (maxfgc / nmaxc); */ // and blend... for (y=0; y<img->y; y++) { unsigned int p = y*img->x; int x; for (x=0; x<img->x; x++) { unsigned px = p + x; if (zbuf->rect[px]!=0.f) { float iZ = (zbuf->rect[px]==0.f) ? 0.f : (1.f/zbuf->rect[px]); // bug #6656 part 2b, do not rescale /* bcrad = 0.5f*fabs(aperture*(dof_sp*(cam_invfdist - iZ) - 1.f)); // scale crad back to original maximum and blend crad->rect[px] = bcrad + wts->rect[px]*(scf*crad->rect[px] - bcrad); */ crad->rect[px] = 0.5f*fabs(aperture*(dof_sp*(cam_invfdist - iZ) - 1.f)); // 'bug' #6615, limit minimum radius to 1 pixel, not really a solution, but somewhat mitigates the problem crad->rect[px] = MAX2(crad->rect[px], 0.5f); // if maxblur!=0, limit maximum if (nqd->maxblur != 0.f) crad->rect[px] = MIN2(crad->rect[px], nqd->maxblur); } else crad->rect[px] = 0.f; // clear weights for next part wts->rect[px] = 0.f; } // esc set by main calling process if(node->exec & NODE_BREAK) break; } } //------------------------------------------------------------------ // main loop #ifndef __APPLE__ /* can crash on Mac, see bug #22856, disabled for now */ #ifdef __INTEL_COMPILER /* icc doesn't like the compound statement -- internal error: 0_1506 */ #pragma omp parallel for private(y) if(!nqd->preview) schedule(guided) #else #pragma omp parallel for private(y) if(!nqd->preview && img->y*img->x > 16384) schedule(guided) #endif #endif for (y=0; y<img->y; y++) { unsigned int p, p4, zp, cp, cp4; float *ctcol, u, v, ct_crad, cR2=0; int x, sx, sy; // some sort of visual feedback would be nice, or at least this text in the renderwin header // but for now just print some info in the console every 8 scanlines. #pragma omp critical { if (((ydone & 7)==0) || (ydone==(img->y-1))) { if(G.background==0) { printf("\rdefocus: Processing Line %d of %d ... ", ydone+1, img->y); fflush(stdout); } } ydone++; } // esc set by main calling process. don't break because openmp doesn't // allow it, just continue and do nothing if(node->exec & NODE_BREAK) continue; zp = y * img->x; for (x=0; x<img->x; x++) { cp = zp + x; cp4 = cp * img->type; // Circle of Confusion radius for current pixel cR2 = ct_crad = crad->rect[cp]; // skip if zero (border render) if (ct_crad==0.f) { // related to bug #5921, forgot output image when skipping 0 radius values new->rect[cp4] = img->rect[cp4]; if (new->type != CB_VAL) { new->rect[cp4+1] = img->rect[cp4+1]; new->rect[cp4+2] = img->rect[cp4+2]; new->rect[cp4+3] = img->rect[cp4+3]; } continue; } cR2 *= cR2; // pixel color ctcol = &img->rect[cp4]; if (!nqd->preview) { int xs, xe, ys, ye; float lwt, wtcol[4] = {0}, aacol[4] = {0}; float wt; // shape weight if (nqd->bktype==0) // disk wt = 1.f/((float)M_PI*cR2); else wt = 1.f/(cR2*bk_hn_theta); // weighted color wtcol[0] = wt*ctcol[0]; if (new->type != CB_VAL) { wtcol[1] = wt*ctcol[1]; wtcol[2] = wt*ctcol[2]; wtcol[3] = wt*ctcol[3]; } // macro for background blur overlap test // unfortunately, since this is done per pixel, // it has a very significant negative impact on processing time... // (eg. aa disk blur without test: 112 sec, vs with test: 176 sec...) // iff center blur radius > threshold // and if overlap pixel in focus, do nothing, else add color/weigbt // (threshold constant is dependant on amount of blur) #define TESTBG1(c, w) {\ if (ct_crad > nqd->bthresh) {\ if (crad->rect[p] > nqd->bthresh) {\ new->rect[p] += c[0];\ wts->rect[p] += w;\ }\ }\ else {\ new->rect[p] += c[0];\ wts->rect[p] += w;\ }\ } #define TESTBG4(c, w) {\ if (ct_crad > nqd->bthresh) {\ if (crad->rect[p] > nqd->bthresh) {\ new->rect[p4] += c[0];\ new->rect[p4+1] += c[1];\ new->rect[p4+2] += c[2];\ new->rect[p4+3] += c[3];\ wts->rect[p] += w;\ }\ }\ else {\ new->rect[p4] += c[0];\ new->rect[p4+1] += c[1];\ new->rect[p4+2] += c[2];\ new->rect[p4+3] += c[3];\ wts->rect[p] += w;\ }\ } if (nqd->bktype == 0) { // Disk int _x, i, j, di; float Dj, T; // AA pixel #define AAPIX(a, b) {\ int _ny = b;\ if ((_ny >= 0) && (_ny < new->y)) {\ int _nx = a;\ if ((_nx >=0) && (_nx < new->x)) {\ p = _ny*new->x + _nx;\ if (new->type==CB_VAL) {\ TESTBG1(aacol, lwt);\ }\ else {\ p4 = p * new->type;\ TESTBG4(aacol, lwt);\ }\ }\ }\ } // circle scanline #define CSCAN(a, b) {\ int _ny = y + b;\ if ((_ny >= 0) && (_ny < new->y)) {\ xs = x - a + 1;\ if (xs < 0) xs = 0;\ xe = x + a;\ if (xe > new->x) xe = new->x;\ p = _ny*new->x + xs;\ if (new->type==CB_VAL) {\ for (_x=xs; _x<xe; _x++, p++) TESTBG1(wtcol, wt);\ }\ else {\ p4 = p * new->type;\ for (_x=xs; _x<xe; _x++, p++, p4+=new->type) TESTBG4(wtcol, wt);\ }\ }\ } i = ceil(ct_crad); j = 0; T = 0; while (i > j) { Dj = sqrt(cR2 - j*j); Dj -= floor(Dj); di = 0; if (Dj > T) { i--; di = 1; } T = Dj; aacol[0] = wtcol[0]*Dj; if (new->type != CB_VAL) { aacol[1] = wtcol[1]*Dj; aacol[2] = wtcol[2]*Dj; aacol[3] = wtcol[3]*Dj; } lwt = wt*Dj; if (i!=j) { // outer pixels AAPIX(x+j, y+i); AAPIX(x+j, y-i); if (j) { AAPIX(x-j, y+i); // BL AAPIX(x-j, y-i); // TL } if (di) { // only when i changed, interior of outer section CSCAN(j, i); // bottom CSCAN(j, -i); // top } } // lower mid section AAPIX(x+i, y+j); if (i) AAPIX(x-i, y+j); CSCAN(i, j); // upper mid section if (j) { AAPIX(x+i, y-j); if (i) AAPIX(x-i, y-j); CSCAN(i, -j); } j++; } #undef CSCAN #undef AAPIX } else { // n-agonal int ov, nv; float mind, maxd, lwt; ys = MAX2((int)floor(bkh_b[2]*ct_crad + y), 0); ye = MIN2((int)ceil(bkh_b[3]*ct_crad + y), new->y - 1); for (sy=ys; sy<=ye; sy++) { float fxs = 1e10f, fxe = -1e10f; float yf = (sy - y)/ct_crad; int found = 0; ov = len_bkh - 1; mind = maxd = 0; for (nv=0; nv<len_bkh; nv++) { if ((BKH[nv].max_y >= yf) && (BKH[nv].min_y <= yf)) { float tx = BKH[ov].x0 + BKH[nv].ls_x*(yf - BKH[ov].y0); if (tx < fxs) { fxs = tx; mind = BKH[nv].ls_x; } if (tx > fxe) { fxe = tx; maxd = BKH[nv].ls_x; } if (++found == 2) break; } ov = nv; } if (found) { fxs = fxs*ct_crad + x; fxe = fxe*ct_crad + x; xs = (int)floor(fxs), xe = (int)ceil(fxe); // AA hack for first and last x pixel, near vertical edges only if (fabs(mind) <= 1.f) { if ((xs >= 0) && (xs < new->x)) { lwt = 1.f-(fxs - xs); aacol[0] = wtcol[0]*lwt; p = xs + sy*new->x; if (new->type==CB_VAL) { lwt *= wt; TESTBG1(aacol, lwt); } else { p4 = p * new->type; aacol[1] = wtcol[1]*lwt; aacol[2] = wtcol[2]*lwt; aacol[3] = wtcol[3]*lwt; lwt *= wt; TESTBG4(aacol, lwt); } } } if (fabs(maxd) <= 1.f) { if ((xe >= 0) && (xe < new->x)) { lwt = 1.f-(xe - fxe); aacol[0] = wtcol[0]*lwt; p = xe + sy*new->x; if (new->type==CB_VAL) { lwt *= wt; TESTBG1(aacol, lwt); } else { p4 = p * new->type; aacol[1] = wtcol[1]*lwt; aacol[2] = wtcol[2]*lwt; aacol[3] = wtcol[3]*lwt; lwt *= wt; TESTBG4(aacol, lwt); } } } xs = MAX2(xs+1, 0); xe = MIN2(xe, new->x); // remaining interior scanline p = sy*new->x + xs; if (new->type==CB_VAL) { for (sx=xs; sx<xe; sx++, p++) TESTBG1(wtcol, wt); } else { p4 = p * new->type; for (sx=xs; sx<xe; sx++, p++, p4+=new->type) TESTBG4(wtcol, wt); } } } // now traverse in opposite direction, y scanlines, // but this time only draw the near horizontal edges, // applying same AA hack as above xs = MAX2((int)floor(bkh_b[0]*ct_crad + x), 0); xe = MIN2((int)ceil(bkh_b[1]*ct_crad + x), img->x - 1); for (sx=xs; sx<=xe; sx++) { float xf = (sx - x)/ct_crad; float fys = 1e10f, fye = -1e10f; int found = 0; ov = len_bkh - 1; mind = maxd = 0; for (nv=0; nv<len_bkh; nv++) { if ((BKH[nv].max_x >= xf) && (BKH[nv].min_x <= xf)) { float ty = BKH[ov].y0 + BKH[nv].ls_y*(xf - BKH[ov].x0); if (ty < fys) { fys = ty; mind = BKH[nv].ls_y; } if (ty > fye) { fye = ty; maxd = BKH[nv].ls_y; } if (++found == 2) break; } ov = nv; } if (found) { fys = fys*ct_crad + y; fye = fye*ct_crad + y; // near horizontal edges only, line slope <= 1 if (fabs(mind) <= 1.f) { int iys = (int)floor(fys); if ((iys >= 0) && (iys < new->y)) { lwt = 1.f - (fys - iys); aacol[0] = wtcol[0]*lwt; p = sx + iys*new->x; if (new->type==CB_VAL) { lwt *= wt; TESTBG1(aacol, lwt); } else { p4 = p * new->type; aacol[1] = wtcol[1]*lwt; aacol[2] = wtcol[2]*lwt; aacol[3] = wtcol[3]*lwt; lwt *= wt; TESTBG4(aacol, lwt); } } } if (fabs(maxd) <= 1.f) { int iye = ceil(fye); if ((iye >= 0) && (iye < new->y)) { lwt = 1.f - (iye - fye); aacol[0] = wtcol[0]*lwt; p = sx + iye*new->x; if (new->type==CB_VAL) { lwt *= wt; TESTBG1(aacol, lwt); } else { p4 = p * new->type; aacol[1] = wtcol[1]*lwt; aacol[2] = wtcol[2]*lwt; aacol[3] = wtcol[3]*lwt; lwt *= wt; TESTBG4(aacol, lwt); } } } } } } #undef TESTBG4 #undef TESTBG1 } else { // sampled, simple rejection sampling here, good enough unsigned int maxsam, s, ui = BLI_rand()*BLI_rand(); float wcor, cpr = BLI_frand(), lwt; if (no_zbuf) maxsam = nqd->samples; // no zbuffer input, use sample value directly else { // depth adaptive sampling hack, the more out of focus, the more samples taken, 16 minimum. maxsam = (int)(0.5f + nqd->samples*(1.f-(float)exp(-fabs(zbuf->rect[cp] - cam_fdist)))); if (maxsam < 16) maxsam = 16; } wcor = 1.f/(float)maxsam; for (s=0; s<maxsam; ++s) { u = ct_crad*(2.f*RI_vdC(s, ui) - 1.f); v = ct_crad*(2.f*(s + cpr)/(float)maxsam - 1.f); sx = (int)(x + u + 0.5f), sy = (int)(y + v + 0.5f); if ((sx<0) || (sx >= new->x) || (sy<0) || (sy >= new->y)) continue; p = sx + sy*new->x; p4 = p * new->type; if (nqd->bktype==0) // Disk lwt = ((u*u + v*v)<=cR2) ? wcor : 0.f; else // AA not needed here lwt = wcor * getWeight(BKH, len_bkh, u, v, ct_crad, inradsq); // prevent background bleeding onto in-focus pixels, user-option if (ct_crad > nqd->bthresh) { // if center blur > threshold if (crad->rect[p] > nqd->bthresh) { // if overlap pixel in focus, do nothing, else add color/weigbt new->rect[p4] += ctcol[0] * lwt; if (new->type != CB_VAL) { new->rect[p4+1] += ctcol[1] * lwt; new->rect[p4+2] += ctcol[2] * lwt; new->rect[p4+3] += ctcol[3] * lwt; } wts->rect[p] += lwt; } } else { new->rect[p4] += ctcol[0] * lwt; if (new->type != CB_VAL) { new->rect[p4+1] += ctcol[1] * lwt; new->rect[p4+2] += ctcol[2] * lwt; new->rect[p4+3] += ctcol[3] * lwt; } wts->rect[p] += lwt; } } } } } // finally, normalize for (y=0; y<new->y; y++) { unsigned int p = y * new->x; unsigned int p4 = p * new->type; int x; for (x=0; x<new->x; x++) { float dv = (wts->rect[p]==0.f) ? 1.f : (1.f/wts->rect[p]); new->rect[p4] *= dv; if (new->type!=CB_VAL) { new->rect[p4+1] *= dv; new->rect[p4+2] *= dv; new->rect[p4+3] *= dv; } p++; p4 += new->type; } } free_compbuf(crad); free_compbuf(wts); printf("Done\n"); } static void node_composit_exec_defocus(void *UNUSED(data), bNode *node, bNodeStack **in, bNodeStack **out) { CompBuf *new, *old, *zbuf_use = NULL, *img = in[0]->data, *zbuf = in[1]->data; NodeDefocus *nqd = node->storage; int no_zbuf = nqd->no_zbuf; if ((img==NULL) || (out[0]->hasoutput==0)) return; // if image not valid type or fstop==infinite (128), nothing to do, pass in to out if (((img->type!=CB_RGBA) && (img->type!=CB_VAL)) || ((no_zbuf==0) && (nqd->fstop==128.f))) { out[0]->data = pass_on_compbuf(img); return; } if (zbuf!=NULL) { // Zbuf input, check to make sure, single channel, same size // doesn't have to be actual zbuffer, but must be value type if ((zbuf->x != img->x) || (zbuf->y != img->y)) { // could do a scale here instead... printf("Z input must be same size as image !\n"); return; } zbuf_use = typecheck_compbuf(zbuf, CB_VAL); } else no_zbuf = 1; // no zbuffer input // ok, process old = img; if (nqd->gamco) { // gamma correct, blender func is simplified, fixed value & RGBA only, // should make user param. also depremul and premul afterwards, gamma // correction can't work with premul alpha old = dupalloc_compbuf(img); premul_compbuf(old, 1); gamma_correct_compbuf(old, 0); premul_compbuf(old, 0); } new = alloc_compbuf(old->x, old->y, old->type, 1); defocus_blur(node, new, old, zbuf_use, in[1]->vec[0]*nqd->scale, no_zbuf); if (nqd->gamco) { premul_compbuf(new, 1); gamma_correct_compbuf(new, 1); premul_compbuf(new, 0); free_compbuf(old); } if(node->exec & NODE_BREAK) { free_compbuf(new); new= NULL; } out[0]->data = new; if (zbuf_use && (zbuf_use != zbuf)) free_compbuf(zbuf_use); } static void node_composit_init_defocus(bNodeTree *UNUSED(ntree), bNode* node, bNodeTemplate *UNUSED(ntemp)) { /* qdn: defocus node */ NodeDefocus *nbd = MEM_callocN(sizeof(NodeDefocus), "node defocus data"); nbd->bktype = 0; nbd->rotation = 0.f; nbd->preview = 1; nbd->gamco = 0; nbd->samples = 16; nbd->fstop = 128.f; nbd->maxblur = 0; nbd->bthresh = 1.f; nbd->scale = 1.f; nbd->no_zbuf = 1; node->storage = nbd; } void register_node_type_cmp_defocus(ListBase *lb) { static bNodeType ntype; node_type_base(&ntype, CMP_NODE_DEFOCUS, "Defocus", NODE_CLASS_OP_FILTER, NODE_OPTIONS); node_type_socket_templates(&ntype, cmp_node_defocus_in, cmp_node_defocus_out); node_type_size(&ntype, 150, 120, 200); node_type_init(&ntype, node_composit_init_defocus); node_type_storage(&ntype, "NodeDefocus", node_free_standard_storage, node_copy_standard_storage); node_type_exec(&ntype, node_composit_exec_defocus); nodeRegisterType(lb, &ntype); }
convolution_2x2_pack8_fp16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv2x2s1_weight_fp16_pack8_avx(const Mat& kernel, Mat& kernel_tm_pack8, int num_input, int num_output) { // src = kw-kh-inch-outch // dst = 8b-8a-kw-kh-inch/8a-outch/8b Mat weight_data_r2 = kernel.reshape(4, num_input, num_output); kernel_tm_pack8.create(4, num_input / 8, num_output / 8, (size_t)2 * 64, 64); for (int q = 0; q + 7 < num_output; q += 8) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); const Mat k4 = weight_data_r2.channel(q + 4); const Mat k5 = weight_data_r2.channel(q + 5); const Mat k6 = weight_data_r2.channel(q + 6); const Mat k7 = weight_data_r2.channel(q + 7); Mat g0 = kernel_tm_pack8.channel(q / 8); for (int p = 0; p + 7 < num_input; p += 8) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k04 = k0.row(p + 4); const float* k05 = k0.row(p + 5); const float* k06 = k0.row(p + 6); const float* k07 = k0.row(p + 7); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k14 = k1.row(p + 4); const float* k15 = k1.row(p + 5); const float* k16 = k1.row(p + 6); const float* k17 = k1.row(p + 7); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k24 = k2.row(p + 4); const float* k25 = k2.row(p + 5); const float* k26 = k2.row(p + 6); const float* k27 = k2.row(p + 7); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k34 = k3.row(p + 4); const float* k35 = k3.row(p + 5); const float* k36 = k3.row(p + 6); const float* k37 = k3.row(p + 7); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k44 = k4.row(p + 4); const float* k45 = k4.row(p + 5); const float* k46 = k4.row(p + 6); const float* k47 = k4.row(p + 7); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k54 = k5.row(p + 4); const float* k55 = k5.row(p + 5); const float* k56 = k5.row(p + 6); const float* k57 = k5.row(p + 7); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k64 = k6.row(p + 4); const float* k65 = k6.row(p + 5); const float* k66 = k6.row(p + 6); const float* k67 = k6.row(p + 7); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); const float* k74 = k7.row(p + 4); const float* k75 = k7.row(p + 5); const float* k76 = k7.row(p + 6); const float* k77 = k7.row(p + 7); unsigned short* g00 = (unsigned short*)g0.row(p / 8); for (int k = 0; k < 4; k++) { g00[0] = float32_to_float16(k00[k]); g00[1] = float32_to_float16(k10[k]); g00[2] = float32_to_float16(k20[k]); g00[3] = float32_to_float16(k30[k]); g00[4] = float32_to_float16(k40[k]); g00[5] = float32_to_float16(k50[k]); g00[6] = float32_to_float16(k60[k]); g00[7] = float32_to_float16(k70[k]); g00 += 8; g00[0] = float32_to_float16(k01[k]); g00[1] = float32_to_float16(k11[k]); g00[2] = float32_to_float16(k21[k]); g00[3] = float32_to_float16(k31[k]); g00[4] = float32_to_float16(k41[k]); g00[5] = float32_to_float16(k51[k]); g00[6] = float32_to_float16(k61[k]); g00[7] = float32_to_float16(k71[k]); g00 += 8; g00[0] = float32_to_float16(k02[k]); g00[1] = float32_to_float16(k12[k]); g00[2] = float32_to_float16(k22[k]); g00[3] = float32_to_float16(k32[k]); g00[4] = float32_to_float16(k42[k]); g00[5] = float32_to_float16(k52[k]); g00[6] = float32_to_float16(k62[k]); g00[7] = float32_to_float16(k72[k]); g00 += 8; g00[0] = float32_to_float16(k03[k]); g00[1] = float32_to_float16(k13[k]); g00[2] = float32_to_float16(k23[k]); g00[3] = float32_to_float16(k33[k]); g00[4] = float32_to_float16(k43[k]); g00[5] = float32_to_float16(k53[k]); g00[6] = float32_to_float16(k63[k]); g00[7] = float32_to_float16(k73[k]); g00 += 8; g00[0] = float32_to_float16(k04[k]); g00[1] = float32_to_float16(k14[k]); g00[2] = float32_to_float16(k24[k]); g00[3] = float32_to_float16(k34[k]); g00[4] = float32_to_float16(k44[k]); g00[5] = float32_to_float16(k54[k]); g00[6] = float32_to_float16(k64[k]); g00[7] = float32_to_float16(k74[k]); g00 += 8; g00[0] = float32_to_float16(k05[k]); g00[1] = float32_to_float16(k15[k]); g00[2] = float32_to_float16(k25[k]); g00[3] = float32_to_float16(k35[k]); g00[4] = float32_to_float16(k45[k]); g00[5] = float32_to_float16(k55[k]); g00[6] = float32_to_float16(k65[k]); g00[7] = float32_to_float16(k75[k]); g00 += 8; g00[0] = float32_to_float16(k06[k]); g00[1] = float32_to_float16(k16[k]); g00[2] = float32_to_float16(k26[k]); g00[3] = float32_to_float16(k36[k]); g00[4] = float32_to_float16(k46[k]); g00[5] = float32_to_float16(k56[k]); g00[6] = float32_to_float16(k66[k]); g00[7] = float32_to_float16(k76[k]); g00 += 8; g00[0] = float32_to_float16(k07[k]); g00[1] = float32_to_float16(k17[k]); g00[2] = float32_to_float16(k27[k]); g00[3] = float32_to_float16(k37[k]); g00[4] = float32_to_float16(k47[k]); g00[5] = float32_to_float16(k57[k]); g00[6] = float32_to_float16(k67[k]); g00[7] = float32_to_float16(k77[k]); g00 += 8; } } } } static void conv2x2s1_fp16_pack8_avx(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); __m256 _bias0 = bias ? _mm256_loadu_ps((const float*)bias + p * 8) : _mm256_set1_ps(0.f); out0.fill(_bias0); for (int q = 0; q < inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const unsigned short* kptr = (const unsigned short*)kernel.channel(p).row(q); // const float* kptr = (const float*)kernel + 4 * inch * p * 64; int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(outptr0); __m256 _sum1 = _mm256_loadu_ps(outptr0 + 8); __m256 _r00 = _mm256_broadcast_ss(r0); __m256 _r01 = _mm256_broadcast_ss(r0 + 1); __m256 _r02 = _mm256_broadcast_ss(r0 + 2); __m256 _r03 = _mm256_broadcast_ss(r0 + 3); __m256 _r04 = _mm256_broadcast_ss(r0 + 4); __m256 _r05 = _mm256_broadcast_ss(r0 + 5); __m256 _r06 = _mm256_broadcast_ss(r0 + 6); __m256 _r07 = _mm256_broadcast_ss(r0 + 7); r0 += 8; __m256 _k00 = loadfp16(kptr); __m256 _k01 = loadfp16(kptr + 8); __m256 _k02 = loadfp16(kptr + 16); __m256 _k03 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k03, _r03, _sum0); __m256 _k04 = loadfp16(kptr); __m256 _k05 = loadfp16(kptr + 8); __m256 _k06 = loadfp16(kptr + 16); __m256 _k07 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k05, _r05, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k06, _r06, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k07, _r07, _sum0); //======================================== _r00 = _mm256_broadcast_ss(r0); _r01 = _mm256_broadcast_ss(r0 + 1); _r02 = _mm256_broadcast_ss(r0 + 2); _r03 = _mm256_broadcast_ss(r0 + 3); _r04 = _mm256_broadcast_ss(r0 + 4); _r05 = _mm256_broadcast_ss(r0 + 5); _r06 = _mm256_broadcast_ss(r0 + 6); _r07 = _mm256_broadcast_ss(r0 + 7); r0 += 8; _sum1 = _mm256_comp_fmadd_ps(_k00, _r00, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k03, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k05, _r05, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k06, _r06, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k07, _r07, _sum1); _k00 = loadfp16(kptr); _k01 = loadfp16(kptr + 8); _k02 = loadfp16(kptr + 16); _k03 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k00, _r00, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k01, _r01, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k02, _r02, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k03, _r03, _sum0); _k04 = loadfp16(kptr); _k05 = loadfp16(kptr + 8); _k06 = loadfp16(kptr + 16); _k07 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k04, _r04, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k05, _r05, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k06, _r06, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k07, _r07, _sum0); _r00 = _mm256_broadcast_ss(r0); _r01 = _mm256_broadcast_ss(r0 + 1); _r02 = _mm256_broadcast_ss(r0 + 2); _r03 = _mm256_broadcast_ss(r0 + 3); _r04 = _mm256_broadcast_ss(r0 + 4); _r05 = _mm256_broadcast_ss(r0 + 5); _r06 = _mm256_broadcast_ss(r0 + 6); _r07 = _mm256_broadcast_ss(r0 + 7); _sum1 = _mm256_comp_fmadd_ps(_k00, _r00, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k01, _r01, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k02, _r02, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k03, _r03, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k04, _r04, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k05, _r05, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k06, _r06, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k07, _r07, _sum1); //=============== __m256 _r10 = _mm256_broadcast_ss(r1); __m256 _r11 = _mm256_broadcast_ss(r1 + 1); __m256 _r12 = _mm256_broadcast_ss(r1 + 2); __m256 _r13 = _mm256_broadcast_ss(r1 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 7); __m256 _k10 = loadfp16(kptr); __m256 _k11 = loadfp16(kptr + 8); __m256 _k12 = loadfp16(kptr + 16); __m256 _k13 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k13, _r13, _sum0); __m256 _k14 = loadfp16(kptr); __m256 _k15 = loadfp16(kptr + 8); __m256 _k16 = loadfp16(kptr + 16); __m256 _k17 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k14, _r14, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k15, _r15, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k16, _r16, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k17, _r17, _sum0); //======================================= r1 += 8; _r10 = _mm256_broadcast_ss(r1); _r11 = _mm256_broadcast_ss(r1 + 1); _r12 = _mm256_broadcast_ss(r1 + 2); _r13 = _mm256_broadcast_ss(r1 + 3); _r14 = _mm256_broadcast_ss(r1 + 4); _r15 = _mm256_broadcast_ss(r1 + 5); _r16 = _mm256_broadcast_ss(r1 + 6); _r17 = _mm256_broadcast_ss(r1 + 7); _sum1 = _mm256_comp_fmadd_ps(_k10, _r10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k13, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k14, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k15, _r15, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k16, _r16, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k17, _r17, _sum1); _k10 = loadfp16(kptr); _k11 = loadfp16(kptr + 8); _k12 = loadfp16(kptr + 16); _k13 = loadfp16(kptr + 24); kptr += 32; _sum0 = _mm256_comp_fmadd_ps(_k10, _r10, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k11, _r11, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k12, _r12, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k13, _r13, _sum0); _k14 = loadfp16(kptr); _k15 = loadfp16(kptr + 8); _k16 = loadfp16(kptr + 16); _k17 = loadfp16(kptr + 24); _sum0 = _mm256_comp_fmadd_ps(_k14, _r14, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k15, _r15, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k16, _r16, _sum0); _sum0 = _mm256_comp_fmadd_ps(_k17, _r17, _sum0); r1 += 8; _r10 = _mm256_broadcast_ss(r1); _r11 = _mm256_broadcast_ss(r1 + 1); _r12 = _mm256_broadcast_ss(r1 + 2); _r13 = _mm256_broadcast_ss(r1 + 3); _r14 = _mm256_broadcast_ss(r1 + 4); _r15 = _mm256_broadcast_ss(r1 + 5); _r16 = _mm256_broadcast_ss(r1 + 6); _r17 = _mm256_broadcast_ss(r1 + 7); _sum1 = _mm256_comp_fmadd_ps(_k10, _r10, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k11, _r11, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k12, _r12, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k13, _r13, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k14, _r14, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k15, _r15, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k16, _r16, _sum1); _sum1 = _mm256_comp_fmadd_ps(_k17, _r17, _sum1); kptr -= 224; _mm256_storeu_ps(outptr0, _sum0); _mm256_storeu_ps(outptr0 + 8, _sum1); outptr0 += 16; } for (; j < outw; j++) { __m256 _sum = _mm256_loadu_ps(outptr0); __m256 _r00 = _mm256_broadcast_ss(r0); __m256 _r01 = _mm256_broadcast_ss(r0 + 1); __m256 _r02 = _mm256_broadcast_ss(r0 + 2); __m256 _r03 = _mm256_broadcast_ss(r0 + 3); __m256 _r04 = _mm256_broadcast_ss(r0 + 4); __m256 _r05 = _mm256_broadcast_ss(r0 + 5); __m256 _r06 = _mm256_broadcast_ss(r0 + 6); __m256 _r07 = _mm256_broadcast_ss(r0 + 7); __m256 _k00 = loadfp16(kptr); __m256 _k01 = loadfp16(kptr + 8); __m256 _k02 = loadfp16(kptr + 16); __m256 _k03 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k00, _r00, _sum); _sum = _mm256_comp_fmadd_ps(_k01, _r01, _sum); _sum = _mm256_comp_fmadd_ps(_k02, _r02, _sum); _sum = _mm256_comp_fmadd_ps(_k03, _r03, _sum); __m256 _k04 = loadfp16(kptr); __m256 _k05 = loadfp16(kptr + 8); __m256 _k06 = loadfp16(kptr + 16); __m256 _k07 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum); _sum = _mm256_comp_fmadd_ps(_k05, _r05, _sum); _sum = _mm256_comp_fmadd_ps(_k06, _r06, _sum); _sum = _mm256_comp_fmadd_ps(_k07, _r07, _sum); //======================================== r0 += 8; _r00 = _mm256_broadcast_ss(r0); _r01 = _mm256_broadcast_ss(r0 + 1); _r02 = _mm256_broadcast_ss(r0 + 2); _r03 = _mm256_broadcast_ss(r0 + 3); _r04 = _mm256_broadcast_ss(r0 + 4); _r05 = _mm256_broadcast_ss(r0 + 5); _r06 = _mm256_broadcast_ss(r0 + 6); _r07 = _mm256_broadcast_ss(r0 + 7); _k00 = loadfp16(kptr); _k01 = loadfp16(kptr + 8); _k02 = loadfp16(kptr + 16); _k03 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k00, _r00, _sum); _sum = _mm256_comp_fmadd_ps(_k01, _r01, _sum); _sum = _mm256_comp_fmadd_ps(_k02, _r02, _sum); _sum = _mm256_comp_fmadd_ps(_k03, _r03, _sum); _k04 = loadfp16(kptr); _k05 = loadfp16(kptr + 8); _k06 = loadfp16(kptr + 16); _k07 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k04, _r04, _sum); _sum = _mm256_comp_fmadd_ps(_k05, _r05, _sum); _sum = _mm256_comp_fmadd_ps(_k06, _r06, _sum); _sum = _mm256_comp_fmadd_ps(_k07, _r07, _sum); //=============== __m256 _r10 = _mm256_broadcast_ss(r1); __m256 _r11 = _mm256_broadcast_ss(r1 + 1); __m256 _r12 = _mm256_broadcast_ss(r1 + 2); __m256 _r13 = _mm256_broadcast_ss(r1 + 3); __m256 _r14 = _mm256_broadcast_ss(r1 + 4); __m256 _r15 = _mm256_broadcast_ss(r1 + 5); __m256 _r16 = _mm256_broadcast_ss(r1 + 6); __m256 _r17 = _mm256_broadcast_ss(r1 + 7); __m256 _k10 = loadfp16(kptr); __m256 _k11 = loadfp16(kptr + 8); __m256 _k12 = loadfp16(kptr + 16); __m256 _k13 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k10, _r10, _sum); _sum = _mm256_comp_fmadd_ps(_k11, _r11, _sum); _sum = _mm256_comp_fmadd_ps(_k12, _r12, _sum); _sum = _mm256_comp_fmadd_ps(_k13, _r13, _sum); __m256 _k14 = loadfp16(kptr); __m256 _k15 = loadfp16(kptr + 8); __m256 _k16 = loadfp16(kptr + 16); __m256 _k17 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k14, _r14, _sum); _sum = _mm256_comp_fmadd_ps(_k15, _r15, _sum); _sum = _mm256_comp_fmadd_ps(_k16, _r16, _sum); _sum = _mm256_comp_fmadd_ps(_k17, _r17, _sum); //======================================= r1 += 8; _r10 = _mm256_broadcast_ss(r1); _r11 = _mm256_broadcast_ss(r1 + 1); _r12 = _mm256_broadcast_ss(r1 + 2); _r13 = _mm256_broadcast_ss(r1 + 3); _r14 = _mm256_broadcast_ss(r1 + 4); _r15 = _mm256_broadcast_ss(r1 + 5); _r16 = _mm256_broadcast_ss(r1 + 6); _r17 = _mm256_broadcast_ss(r1 + 7); _k10 = loadfp16(kptr); _k11 = loadfp16(kptr + 8); _k12 = loadfp16(kptr + 16); _k13 = loadfp16(kptr + 24); kptr += 32; _sum = _mm256_comp_fmadd_ps(_k10, _r10, _sum); _sum = _mm256_comp_fmadd_ps(_k11, _r11, _sum); _sum = _mm256_comp_fmadd_ps(_k12, _r12, _sum); _sum = _mm256_comp_fmadd_ps(_k13, _r13, _sum); _k14 = loadfp16(kptr); _k15 = loadfp16(kptr + 8); _k16 = loadfp16(kptr + 16); _k17 = loadfp16(kptr + 24); _sum = _mm256_comp_fmadd_ps(_k14, _r14, _sum); _sum = _mm256_comp_fmadd_ps(_k15, _r15, _sum); _sum = _mm256_comp_fmadd_ps(_k16, _r16, _sum); _sum = _mm256_comp_fmadd_ps(_k17, _r17, _sum); kptr -= 224; _mm256_storeu_ps(outptr0, _sum); outptr0 += 8; } r0 += 8; r1 += 8; } } } }
fci_contract_nosym.c
/* * Paticle permutation symmetry for 2e Hamiltonian only * h2e[i,j,k,l] == h2e[k,l,i,j] * h2e[i,j,k,l] =/= h2e[j,i,k,l] =/= h2e[i,j,l,k] ... */ #include <stdlib.h> #include <string.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "fci.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define CSUMTHR 1e-28 #define STRB_BLKSIZE 112 double FCI_t1ci_sf(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb); void FCIcontract_a_1e_nosym(double *h1e, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, i, a, sign; size_t str0, str1; double *pci0, *pci1; double tmp; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinka * nstra); FCIcompress_link(clink, link_indexa, norb, nstra, nlinka); for (str0 = 0; str0 < nstra; str0++) { tab = clink + str0 * nlinka; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); // propagate from t1 to bra, through a^+ i i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci0 = ci0 + str0 * nstrb; pci1 = ci1 + str1 * nstrb; tmp = sign * h1e[a*norb+i]; for (k = 0; k < nstrb; k++) { pci1[k] += tmp * pci0[k]; } } } free(clink); } void FCIcontract_b_1e_nosym(double *h1e, double *ci0, double *ci1, int norb, int nstra, int nstrb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { int j, k, i, a, sign; size_t str0, str1; double *pci1; double tmp; _LinkT *tab; _LinkT *clink = malloc(sizeof(_LinkT) * nlinkb * nstrb); FCIcompress_link(clink, link_indexb, norb, nstrb, nlinkb); for (str0 = 0; str0 < nstra; str0++) { pci1 = ci1 + str0 * nstrb; for (k = 0; k < nstrb; k++) { tab = clink + k * nlinkb; tmp = ci0[str0*nstrb+k]; for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci1[str1] += sign * tmp * h1e[a*norb+i]; } } } free(clink); } static void spread_a_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkT *clink_indexa) { ci1 += strb_id; const int nnorb = norb * norb; int j, k, i, a, str1, sign; const _LinkT *tab = clink_indexa + stra_id * nlinka; double *cp0, *cp1; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); cp0 = t1 + a*norb+i; // propagate from t1 to bra, through a^+ i cp1 = ci1 + str1*(size_t)nstrb; if (sign > 0) { for (k = 0; k < bcount; k++) { cp1[k] += cp0[k*nnorb]; } } else { for (k = 0; k < bcount; k++) { cp1[k] -= cp0[k*nnorb]; } } } } static void spread_b_t1(double *ci1, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinkb, _LinkT *clink_indexb) { const int nnorb = norb * norb; int j, i, a, str0, str1, sign; const _LinkT *tab = clink_indexb + strb_id * nlinkb; double *pci = ci1 + stra_id * (size_t)nstrb; for (str0 = 0; str0 < bcount; str0++) { for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); // propagate from t1 to bra, through a^+ i pci[str1] += sign * t1[a*norb+i]; } t1 += nnorb; tab += nlinkb; } } static void ctr_rhf2e_kern(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount_for_spread_a, int ncol_ci1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * norb; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; double csum; csum = FCI_t1ci_sf(ci0, t1, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (csum > CSUMTHR) { dgemm_(&TRANS_N, &TRANS_N, &nnorb, &bcount, &nnorb, &D1, eri, &nnorb, t1, &nnorb, &D0, vt1, &nnorb); spread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); spread_a_t1(ci1buf, vt1, bcount_for_spread_a, stra_id, 0, norb, ncol_ci1buf, nlinka, clink_indexa); } } static void axpy2d(double *out, double *in, int count, int no, int ni) { int i, j; for (i = 0; i < count; i++) { for (j = 0; j < ni; j++) { out[i*no+j] += in[i*ni+j]; } } } void FCIcontract_2es1(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); memset(ci1, 0, sizeof(double)*na*nb); #pragma omp parallel default(none) \ shared(eri, ci0, ci1, norb, na, nb, nlinka, nlinkb, \ clinka, clinkb) { int strk, ib, blen; double *t1buf = malloc(sizeof(double) * STRB_BLKSIZE*norb*norb*2); double *ci1buf = malloc(sizeof(double) * na*STRB_BLKSIZE); for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_rhf2e_kern(eri, ci0, ci1, ci1buf, t1buf, blen, blen, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } #pragma omp critical axpy2d(ci1+ib, ci1buf, na, nb, blen); } free(ci1buf); free(t1buf); } free(clinka); free(clinkb); }
par_ilu_solve.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * ILU solve routine * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "_hypre_utilities.hpp" #include "par_ilu.h" /*-------------------------------------------------------------------- * hypre_ILUSolve *--------------------------------------------------------------------*/ HYPRE_Int hypre_ILUSolve( void *ilu_vdata, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); // HYPRE_Int i; hypre_ParILUData *ilu_data = (hypre_ParILUData*) ilu_vdata; #ifdef HYPRE_USING_CUDA /* pointers to cusparse data, note that they are not NULL only when needed */ cusparseMatDescr_t matL_des = hypre_ParILUDataMatLMatrixDescription(ilu_data); cusparseMatDescr_t matU_des = hypre_ParILUDataMatUMatrixDescription(ilu_data); void *ilu_solve_buffer = hypre_ParILUDataILUSolveBuffer(ilu_data);//device memory cusparseSolvePolicy_t ilu_solve_policy = hypre_ParILUDataILUSolvePolicy(ilu_data); hypre_CSRMatrix *matALU_d = hypre_ParILUDataMatAILUDevice(ilu_data); hypre_CSRMatrix *matBLU_d = hypre_ParILUDataMatBILUDevice(ilu_data); //hypre_CSRMatrix *matSLU_d = hypre_ParILUDataMatSILUDevice(ilu_data); hypre_CSRMatrix *matE_d = hypre_ParILUDataMatEDevice(ilu_data); hypre_CSRMatrix *matF_d = hypre_ParILUDataMatFDevice(ilu_data); csrsv2Info_t matAL_info = hypre_ParILUDataMatALILUSolveInfo(ilu_data); csrsv2Info_t matAU_info = hypre_ParILUDataMatAUILUSolveInfo(ilu_data); csrsv2Info_t matBL_info = hypre_ParILUDataMatBLILUSolveInfo(ilu_data); csrsv2Info_t matBU_info = hypre_ParILUDataMatBUILUSolveInfo(ilu_data); csrsv2Info_t matSL_info = hypre_ParILUDataMatSLILUSolveInfo(ilu_data); csrsv2Info_t matSU_info = hypre_ParILUDataMatSUILUSolveInfo(ilu_data); hypre_ParCSRMatrix *Aperm = hypre_ParILUDataAperm(ilu_data); //hypre_ParCSRMatrix *R = hypre_ParILUDataR(ilu_data); //hypre_ParCSRMatrix *P = hypre_ParILUDataP(ilu_data); #endif /* get matrices */ HYPRE_Int ilu_type = hypre_ParILUDataIluType(ilu_data); HYPRE_Int *perm = hypre_ParILUDataPerm(ilu_data); HYPRE_Int *qperm = hypre_ParILUDataQPerm(ilu_data); hypre_ParCSRMatrix *matA = hypre_ParILUDataMatA(ilu_data); hypre_ParCSRMatrix *matL = hypre_ParILUDataMatL(ilu_data); HYPRE_Real *matD = hypre_ParILUDataMatD(ilu_data); hypre_ParCSRMatrix *matU = hypre_ParILUDataMatU(ilu_data); #ifndef HYPRE_USING_CUDA hypre_ParCSRMatrix *matmL = hypre_ParILUDataMatLModified(ilu_data); HYPRE_Real *matmD = hypre_ParILUDataMatDModified(ilu_data); hypre_ParCSRMatrix *matmU = hypre_ParILUDataMatUModified(ilu_data); #endif hypre_ParCSRMatrix *matS = hypre_ParILUDataMatS(ilu_data); HYPRE_Int iter, num_procs, my_id; hypre_ParVector *F_array = hypre_ParILUDataF(ilu_data); hypre_ParVector *U_array = hypre_ParILUDataU(ilu_data); /* get settings */ HYPRE_Real tol = hypre_ParILUDataTol(ilu_data); HYPRE_Int logging = hypre_ParILUDataLogging(ilu_data); HYPRE_Int print_level = hypre_ParILUDataPrintLevel(ilu_data); HYPRE_Int max_iter = hypre_ParILUDataMaxIter(ilu_data); HYPRE_Real *norms = hypre_ParILUDataRelResNorms(ilu_data); hypre_ParVector *Ftemp = hypre_ParILUDataFTemp(ilu_data); hypre_ParVector *Utemp = hypre_ParILUDataUTemp(ilu_data); hypre_ParVector *Xtemp = hypre_ParILUDataXTemp(ilu_data); hypre_ParVector *Ytemp = hypre_ParILUDataYTemp(ilu_data); HYPRE_Real *fext = hypre_ParILUDataFExt(ilu_data); HYPRE_Real *uext = hypre_ParILUDataUExt(ilu_data); hypre_ParVector *residual; HYPRE_Real alpha = -1; HYPRE_Real beta = 1; HYPRE_Real conv_factor = 0.0; HYPRE_Real resnorm = 1.0; HYPRE_Real init_resnorm = 0.0; HYPRE_Real rel_resnorm; HYPRE_Real rhs_norm = 0.0; HYPRE_Real old_resnorm; HYPRE_Real ieee_check = 0.0; HYPRE_Real operat_cmplxty = hypre_ParILUDataOperatorComplexity(ilu_data); HYPRE_Int Solve_err_flag; #ifdef HYPRE_USING_CUDA HYPRE_Int test_opt; #endif /* problem size */ HYPRE_Int n = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); HYPRE_Int nLU = hypre_ParILUDataNLU(ilu_data); HYPRE_Int *u_end = hypre_ParILUDataUEnd(ilu_data); /* Schur system solve */ HYPRE_Solver schur_solver = hypre_ParILUDataSchurSolver(ilu_data); HYPRE_Solver schur_precond = hypre_ParILUDataSchurPrecond(ilu_data); hypre_ParVector *rhs = hypre_ParILUDataRhs(ilu_data); hypre_ParVector *x = hypre_ParILUDataX(ilu_data); /* begin */ HYPRE_ANNOTATE_FUNC_BEGIN; if (logging > 1) { residual = hypre_ParILUDataResidual(ilu_data); } hypre_ParILUDataNumIterations(ilu_data) = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /*----------------------------------------------------------------------- * Write the solver parameters *-----------------------------------------------------------------------*/ if (my_id == 0 && print_level > 1) { hypre_ILUWriteSolverParams(ilu_data); } /*----------------------------------------------------------------------- * Initialize the solver error flag *-----------------------------------------------------------------------*/ Solve_err_flag = 0; /*----------------------------------------------------------------------- * write some initial info *-----------------------------------------------------------------------*/ if (my_id == 0 && print_level > 1 && tol > 0.) { hypre_printf("\n\n ILU SOLVER SOLUTION INFO:\n"); } /*----------------------------------------------------------------------- * Compute initial residual and print *-----------------------------------------------------------------------*/ if (print_level > 1 || logging > 1 || tol > 0.) { if ( logging > 1 ) { hypre_ParVectorCopy(f, residual ); if (tol > 0.0) { hypre_ParCSRMatrixMatvec(alpha, A, u, beta, residual ); } resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual )); } else { hypre_ParVectorCopy(f, Ftemp); if (tol > 0.0) { hypre_ParCSRMatrixMatvec(alpha, A, u, beta, Ftemp); } resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp)); } /* Since it is does not diminish performance, attempt to return an error flag and notify users when they supply bad input. */ if (resnorm != 0.) { ieee_check = resnorm / resnorm; /* INF -> NaN conversion */ } if (ieee_check != ieee_check) { /* ...INFs or NaNs in input can make ieee_check a NaN. This test for ieee_check self-equality works on all IEEE-compliant compilers/ machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754" by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */ if (print_level > 0) { hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n"); hypre_printf("ERROR -- hypre_ILUSolve: INFs and/or NaNs detected in input.\n"); hypre_printf("User probably placed non-numerics in supplied A, x_0, or b.\n"); hypre_printf("ERROR detected by Hypre ... END\n\n\n"); } hypre_error(HYPRE_ERROR_GENERIC); HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } init_resnorm = resnorm; rhs_norm = sqrt(hypre_ParVectorInnerProd(f, f)); if (rhs_norm > HYPRE_REAL_EPSILON) { rel_resnorm = init_resnorm / rhs_norm; } else { /* rhs is zero, return a zero solution */ hypre_ParVectorSetConstantValues(U_array, 0.0); if (logging > 0) { rel_resnorm = 0.0; hypre_ParILUDataFinalRelResidualNorm(ilu_data) = rel_resnorm; } HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } } else { rel_resnorm = 1.; } if (my_id == 0 && print_level > 1) { hypre_printf(" relative\n"); hypre_printf(" residual factor residual\n"); hypre_printf(" -------- ------ --------\n"); hypre_printf(" Initial %e %e\n", init_resnorm, rel_resnorm); } matA = A; U_array = u; F_array = f; /************** Main Solver Loop - always do 1 iteration ************/ iter = 0; while ((rel_resnorm >= tol || iter < 1) && iter < max_iter) { /* Do one solve on LUe=r */ switch (ilu_type) { case 0: case 1: #ifdef HYPRE_USING_CUDA /* Apply GPU-accelerated LU solve */ hypre_ILUSolveCusparseLU(matA, matL_des, matU_des, matBL_info, matBU_info, matBLU_d, ilu_solve_policy, ilu_solve_buffer, F_array, U_array, perm, n, Utemp, Ftemp);//BJ-cusparse #else hypre_ILUSolveLU(matA, F_array, U_array, perm, n, matL, matD, matU, Utemp, Ftemp); //BJ #endif break; case 10: case 11: #ifdef HYPRE_USING_CUDA /* Apply GPU-accelerated LU solve */ hypre_ILUSolveCusparseSchurGMRES(matA, F_array, U_array, perm, nLU, matS, Utemp, Ftemp, schur_solver, schur_precond, rhs, x, u_end, matL_des, matU_des, matBL_info, matBU_info, matSL_info, matSU_info, matBLU_d, matE_d, matF_d, ilu_solve_policy, ilu_solve_buffer);//GMRES-cusparse #else hypre_ILUSolveSchurGMRES(matA, F_array, U_array, perm, perm, nLU, matL, matD, matU, matS, Utemp, Ftemp, schur_solver, schur_precond, rhs, x, u_end); //GMRES #endif break; case 20: case 21: hypre_ILUSolveSchurNSH(matA, F_array, U_array, perm, nLU, matL, matD, matU, matS, Utemp, Ftemp, schur_solver, rhs, x, u_end); //MR+NSH break; case 30: case 31: hypre_ILUSolveLURAS(matA, F_array, U_array, perm, matL, matD, matU, Utemp, Utemp, fext, uext); //RAS break; case 40: case 41: hypre_ILUSolveSchurGMRES(matA, F_array, U_array, perm, qperm, nLU, matL, matD, matU, matS, Utemp, Ftemp, schur_solver, schur_precond, rhs, x, u_end); //GMRES break; case 50: #ifdef HYPRE_USING_CUDA test_opt = hypre_ParILUDataTestOption(ilu_data); hypre_ILUSolveRAPGMRES(matA, F_array, U_array, perm, nLU, matS, Utemp, Ftemp, Xtemp, Ytemp, schur_solver, schur_precond, rhs, x, u_end, matL_des, matU_des, matAL_info, matAU_info, matBL_info, matBU_info, matSL_info, matSU_info, Aperm, matALU_d, matBLU_d, matE_d, matF_d, ilu_solve_policy, ilu_solve_buffer, test_opt);//GMRES-RAP #else hypre_ILUSolveRAPGMRESHOST(matA, F_array, U_array, perm, nLU, matL, matD, matU, matmL, matmD, matmU, Utemp, Ftemp, Xtemp, Ytemp, schur_solver, schur_precond, rhs, x, u_end);//GMRES-RAP #endif break; default: #ifdef HYPRE_USING_CUDA /* Apply GPU-accelerated LU solve */ hypre_ILUSolveCusparseLU(matA, matL_des, matU_des, matBL_info, matBU_info, matBLU_d, ilu_solve_policy, ilu_solve_buffer, F_array, U_array, perm, n, Utemp, Ftemp);//BJ-cusparse #else hypre_ILUSolveLU(matA, F_array, U_array, perm, n, matL, matD, matU, Utemp, Ftemp); //BJ #endif break; } /*--------------------------------------------------------------- * Compute residual and residual norm *----------------------------------------------------------------*/ if (print_level > 1 || logging > 1 || tol > 0.) { old_resnorm = resnorm; if ( logging > 1 ) { hypre_ParVectorCopy(F_array, residual); hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, residual ); resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual )); } else { hypre_ParVectorCopy(F_array, Ftemp); hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, Ftemp); resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp)); } if (old_resnorm) { conv_factor = resnorm / old_resnorm; } else { conv_factor = resnorm; } if (rhs_norm > HYPRE_REAL_EPSILON) { rel_resnorm = resnorm / rhs_norm; } else { rel_resnorm = resnorm; } norms[iter] = rel_resnorm; } ++iter; hypre_ParILUDataNumIterations(ilu_data) = iter; hypre_ParILUDataFinalRelResidualNorm(ilu_data) = rel_resnorm; if (my_id == 0 && print_level > 1) { hypre_printf(" ILUSolve %2d %e %f %e \n", iter, resnorm, conv_factor, rel_resnorm); } } /* check convergence within max_iter */ if (iter == max_iter && tol > 0.) { Solve_err_flag = 1; hypre_error(HYPRE_ERROR_CONV); } /*----------------------------------------------------------------------- * Print closing statistics * Add operator and grid complexity stats *-----------------------------------------------------------------------*/ if (iter > 0 && init_resnorm) { conv_factor = pow((resnorm / init_resnorm), (1.0 / (HYPRE_Real) iter)); } else { conv_factor = 1.; } if (print_level > 1) { /*** compute operator and grid complexity (fill factor) here ?? ***/ if (my_id == 0) { if (Solve_err_flag == 1) { hypre_printf("\n\n=============================================="); hypre_printf("\n NOTE: Convergence tolerance was not achieved\n"); hypre_printf(" within the allowed %d iterations\n", max_iter); hypre_printf("=============================================="); } hypre_printf("\n\n Average Convergence Factor = %f \n", conv_factor); hypre_printf(" operator = %f\n", operat_cmplxty); } } HYPRE_ANNOTATE_FUNC_END; return hypre_error_flag; } /* Schur Complement solve with GMRES on schur complement * ParCSRMatrix S is already built in ilu data sturcture, here directly use S * L, D and U factors only have local scope (no off-diagonal processor terms) * so apart from the residual calculation (which uses A), the solves with the * L and U factors are local. * S is the global Schur complement * schur_solver is a GMRES solver * schur_precond is the ILU preconditioner for GMRES * rhs and x are helper vector for solving Schur system */ HYPRE_Int hypre_ILUSolveSchurGMRES(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm, HYPRE_Int *qperm, HYPRE_Int nLU, hypre_ParCSRMatrix *L, HYPRE_Real* D, hypre_ParCSRMatrix *U, hypre_ParCSRMatrix *S, hypre_ParVector *ftemp, hypre_ParVector *utemp, HYPRE_Solver schur_solver, HYPRE_Solver schur_precond, hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end) { /* data objects for communication */ // MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* data objects for L and U */ hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L); HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag); HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag); HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag); hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U); HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag); HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag); HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag); hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp); HYPRE_Real *utemp_data = hypre_VectorData(utemp_local); hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp); HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local); HYPRE_Real alpha; HYPRE_Real beta; HYPRE_Int i, j, k1, k2, col; /* problem size */ HYPRE_Int n = hypre_CSRMatrixNumRows(L_diag); // HYPRE_Int m = n - nLU; /* other data objects for computation */ // hypre_Vector *f_local; // HYPRE_Real *f_data; hypre_Vector *rhs_local; HYPRE_Real *rhs_data; hypre_Vector *x_local; HYPRE_Real *x_data; /* begin */ beta = 1.0; alpha = -1.0; /* compute residual */ hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp); /* 1st need to solve LBi*xi = fi * L solve, solve xi put in u_temp upper */ // f_local = hypre_ParVectorLocalVector(f); // f_data = hypre_VectorData(f_local); /* now update with L to solve */ for (i = 0 ; i < nLU ; i ++) { utemp_data[qperm[i]] = ftemp_data[perm[i]]; k1 = L_diag_i[i] ; k2 = L_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { utemp_data[qperm[i]] -= L_diag_data[j] * utemp_data[qperm[L_diag_j[j]]]; } } /* 2nd need to compute g'i = gi - Ei*UBi^-1*xi * now put g'i into the f_temp lower */ for (i = nLU ; i < n ; i ++) { k1 = L_diag_i[i] ; k2 = L_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { col = L_diag_j[j]; ftemp_data[perm[i]] -= L_diag_data[j] * utemp_data[qperm[col]]; } } /* 3rd need to solve global Schur Complement Sy = g' * for now only solve the local system * solve y put in u_temp lower * only solve whe S is not NULL */ if (S) { /*initialize solution to zero for residual equation */ hypre_ParVectorSetConstantValues(x, 0.0); /* setup vectors for solve */ rhs_local = hypre_ParVectorLocalVector(rhs); rhs_data = hypre_VectorData(rhs_local); x_local = hypre_ParVectorLocalVector(x); x_data = hypre_VectorData(x_local); /* set rhs value */ for (i = nLU ; i < n ; i ++) { rhs_data[i - nLU] = ftemp_data[perm[i]]; } /* solve */ HYPRE_GMRESSolve(schur_solver, (HYPRE_Matrix)S, (HYPRE_Vector)rhs, (HYPRE_Vector)x); /* copy value back to original */ for (i = nLU ; i < n ; i ++) { utemp_data[qperm[i]] = x_data[i - nLU]; } } /* 4th need to compute zi = xi - LBi^-1*Fi*yi * put zi in f_temp upper * only do this computation when nLU < n * U is unsorted, search is expensive when unnecessary */ if (nLU < n) { for (i = 0 ; i < nLU ; i ++) { ftemp_data[perm[i]] = utemp_data[qperm[i]]; k1 = u_end[i] ; k2 = U_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { col = U_diag_j[j]; ftemp_data[perm[i]] -= U_diag_data[j] * utemp_data[qperm[col]]; } } for (i = 0 ; i < nLU ; i ++) { utemp_data[qperm[i]] = ftemp_data[perm[i]]; } } /* 5th need to solve UBi*ui = zi */ /* put result in u_temp upper */ for (i = nLU - 1 ; i >= 0 ; i --) { k1 = U_diag_i[i] ; k2 = u_end[i]; for (j = k1 ; j < k2 ; j ++) { col = U_diag_j[j]; utemp_data[qperm[i]] -= U_diag_data[j] * utemp_data[qperm[col]]; } utemp_data[qperm[i]] *= D[i]; } /* done, now everything are in u_temp, update solution */ hypre_ParVectorAxpy(beta, utemp, u); return hypre_error_flag; } /* Newton-Schulz-Hotelling solve * ParCSRMatrix S is already built in ilu data sturcture * S here is the INVERSE of Schur Complement * L, D and U factors only have local scope (no off-diagonal processor terms) * so apart from the residual calculation (which uses A), the solves with the * L and U factors are local. * S is the inverse global Schur complement * rhs and x are helper vector for solving Schur system */ HYPRE_Int hypre_ILUSolveSchurNSH(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm, HYPRE_Int nLU, hypre_ParCSRMatrix *L, HYPRE_Real* D, hypre_ParCSRMatrix *U, hypre_ParCSRMatrix *S, hypre_ParVector *ftemp, hypre_ParVector *utemp, HYPRE_Solver schur_solver, hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end) { /* data objects for communication */ // MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* data objects for L and U */ hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L); HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag); HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag); HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag); hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U); HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag); HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag); HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag); hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp); HYPRE_Real *utemp_data = hypre_VectorData(utemp_local); hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp); HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local); HYPRE_Real alpha; HYPRE_Real beta; HYPRE_Int i, j, k1, k2, col; /* problem size */ HYPRE_Int n = hypre_CSRMatrixNumRows(L_diag); // HYPRE_Int m = n - nLU; /* other data objects for computation */ // hypre_Vector *f_local; // HYPRE_Real *f_data; hypre_Vector *rhs_local; HYPRE_Real *rhs_data; hypre_Vector *x_local; HYPRE_Real *x_data; /* begin */ beta = 1.0; alpha = -1.0; /* compute residual */ hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp); /* 1st need to solve LBi*xi = fi * L solve, solve xi put in u_temp upper */ // f_local = hypre_ParVectorLocalVector(f); // f_data = hypre_VectorData(f_local); /* now update with L to solve */ for (i = 0 ; i < nLU ; i ++) { utemp_data[perm[i]] = ftemp_data[perm[i]]; k1 = L_diag_i[i] ; k2 = L_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { utemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[L_diag_j[j]]]; } } /* 2nd need to compute g'i = gi - Ei*UBi^-1*xi * now put g'i into the f_temp lower */ for (i = nLU ; i < n ; i ++) { k1 = L_diag_i[i] ; k2 = L_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { col = L_diag_j[j]; ftemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[col]]; } } /* 3rd need to solve global Schur Complement Sy = g' * for now only solve the local system * solve y put in u_temp lower * only solve when S is not NULL */ if (S) { /*initialize solution to zero for residual equation */ hypre_ParVectorSetConstantValues(x, 0.0); /* setup vectors for solve */ rhs_local = hypre_ParVectorLocalVector(rhs); rhs_data = hypre_VectorData(rhs_local); x_local = hypre_ParVectorLocalVector(x); x_data = hypre_VectorData(x_local); /* set rhs value */ for (i = nLU ; i < n ; i ++) { rhs_data[i - nLU] = ftemp_data[perm[i]]; } /* Solve Schur system with approx inverse * x = S*rhs */ hypre_NSHSolve(schur_solver, S, rhs, x); /* copy value back to original */ for (i = nLU ; i < n ; i ++) { utemp_data[perm[i]] = x_data[i - nLU]; } } /* 4th need to compute zi = xi - LBi^-1*yi * put zi in f_temp upper * only do this computation when nLU < n * U is unsorted, search is expensive when unnecessary */ if (nLU < n) { for (i = 0 ; i < nLU ; i ++) { ftemp_data[perm[i]] = utemp_data[perm[i]]; k1 = u_end[i] ; k2 = U_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { col = U_diag_j[j]; ftemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[col]]; } } for (i = 0 ; i < nLU ; i ++) { utemp_data[perm[i]] = ftemp_data[perm[i]]; } } /* 5th need to solve UBi*ui = zi */ /* put result in u_temp upper */ for (i = nLU - 1 ; i >= 0 ; i --) { k1 = U_diag_i[i] ; k2 = u_end[i]; for (j = k1 ; j < k2 ; j ++) { col = U_diag_j[j]; utemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[col]]; } utemp_data[perm[i]] *= D[i]; } /* done, now everything are in u_temp, update solution */ hypre_ParVectorAxpy(beta, utemp, u); return hypre_error_flag; } /* Incomplete LU solve * L, D and U factors only have local scope (no off-diagonal processor terms) * so apart from the residual calculation (which uses A), the solves with the * L and U factors are local. */ HYPRE_Int hypre_ILUSolveLU(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm, HYPRE_Int nLU, hypre_ParCSRMatrix *L, HYPRE_Real* D, hypre_ParCSRMatrix *U, hypre_ParVector *ftemp, hypre_ParVector *utemp) { hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L); HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag); HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag); HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag); hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U); HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag); HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag); HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag); hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp); HYPRE_Real *utemp_data = hypre_VectorData(utemp_local); hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp); HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local); HYPRE_Real alpha; HYPRE_Real beta; HYPRE_Int i, j, k1, k2; /* begin */ alpha = -1.0; beta = 1.0; /* Initialize Utemp to zero. * This is necessary for correctness, when we use optimized * vector operations in the case where sizeof(L, D or U) < sizeof(A) */ //hypre_ParVectorSetConstantValues( utemp, 0.); /* compute residual */ hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp); /* L solve - Forward solve */ /* copy rhs to account for diagonal of L (which is identity) */ for ( i = 0; i < nLU; i++ ) { utemp_data[perm[i]] = ftemp_data[perm[i]]; } /* update with remaining (off-diagonal) entries of L */ for ( i = 0; i < nLU; i++ ) { k1 = L_diag_i[i] ; k2 = L_diag_i[i + 1]; for (j = k1; j < k2; j++) { utemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[L_diag_j[j]]]; } } /*-------------------- U solve - Backward substitution */ for ( i = nLU - 1; i >= 0; i-- ) { /* first update with the remaining (off-diagonal) entries of U */ k1 = U_diag_i[i] ; k2 = U_diag_i[i + 1]; for (j = k1; j < k2; j++) { utemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[U_diag_j[j]]]; } /* diagonal scaling (contribution from D. Note: D is stored as its inverse) */ utemp_data[perm[i]] *= D[i]; } /* Update solution */ hypre_ParVectorAxpy(beta, utemp, u); return hypre_error_flag; } /* Incomplete LU solve RAS * L, D and U factors only have local scope (no off-diagonal processor terms) * so apart from the residual calculation (which uses A), the solves with the * L and U factors are local. * fext and uext are tempory arrays for external data */ HYPRE_Int hypre_ILUSolveLURAS(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm, hypre_ParCSRMatrix *L, HYPRE_Real* D, hypre_ParCSRMatrix *U, hypre_ParVector *ftemp, hypre_ParVector *utemp, HYPRE_Real *fext, HYPRE_Real *uext) { hypre_ParCSRCommPkg *comm_pkg; hypre_ParCSRCommHandle *comm_handle; HYPRE_Int num_sends, begin, end; hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L); HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag); HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag); HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag); hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U); HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag); HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag); HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag); HYPRE_Int n = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A)); HYPRE_Int m = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)); // HYPRE_Int buffer_size; HYPRE_Int n_total = m + n; HYPRE_Int idx; HYPRE_Int jcol; HYPRE_Int col; hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp); HYPRE_Real *utemp_data = hypre_VectorData(utemp_local); hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp); HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local); HYPRE_Real alpha; HYPRE_Real beta; HYPRE_Int i, j, k1, k2; /* begin */ alpha = -1.0; beta = 1.0; /* prepare for communication */ comm_pkg = hypre_ParCSRMatrixCommPkg(A); /* setup if not yet built */ if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } /* Initialize Utemp to zero. * This is necessary for correctness, when we use optimized * vector operations in the case where sizeof(L, D or U) < sizeof(A) */ //hypre_ParVectorSetConstantValues( utemp, 0.); /* compute residual */ hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp); /* communication to get external data */ /* get total num of send */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); begin = hypre_ParCSRCommPkgSendMapStart(comm_pkg, 0); end = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* copy new index into send_buf */ for (i = begin ; i < end ; i ++) { /* all we need is just send out data, we don't need to worry about the * permutation of offd part, actually we don't need to worry about * permutation at all * borrow uext as send buffer . */ uext[i - begin] = ftemp_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i)]; } /* main communication */ comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, uext, fext); hypre_ParCSRCommHandleDestroy(comm_handle); /* L solve - Forward solve */ for ( i = 0 ; i < n_total ; i ++) { k1 = L_diag_i[i] ; k2 = L_diag_i[i + 1]; if ( i < n ) { /* diag part */ utemp_data[perm[i]] = ftemp_data[perm[i]]; for (j = k1; j < k2; j++) { col = L_diag_j[j]; if ( col < n ) { utemp_data[perm[i]] -= L_diag_data[j] * utemp_data[perm[col]]; } else { jcol = col - n; utemp_data[perm[i]] -= L_diag_data[j] * uext[jcol]; } } } else { /* offd part */ idx = i - n; uext[idx] = fext[idx]; for (j = k1; j < k2; j++) { col = L_diag_j[j]; if (col < n) { uext[idx] -= L_diag_data[j] * utemp_data[perm[col]]; } else { jcol = col - n; uext[idx] -= L_diag_data[j] * uext[jcol]; } } } } /*-------------------- U solve - Backward substitution */ for ( i = n_total - 1; i >= 0; i-- ) { /* first update with the remaining (off-diagonal) entries of U */ k1 = U_diag_i[i] ; k2 = U_diag_i[i + 1]; if ( i < n ) { /* diag part */ for (j = k1; j < k2; j++) { col = U_diag_j[j]; if ( col < n ) { utemp_data[perm[i]] -= U_diag_data[j] * utemp_data[perm[col]]; } else { jcol = col - n; utemp_data[perm[i]] -= U_diag_data[j] * uext[jcol]; } } /* diagonal scaling (contribution from D. Note: D is stored as its inverse) */ utemp_data[perm[i]] *= D[i]; } else { /* 2nd part of offd */ idx = i - n; for (j = k1; j < k2; j++) { col = U_diag_j[j]; if ( col < n ) { uext[idx] -= U_diag_data[j] * utemp_data[perm[col]]; } else { jcol = col - n; uext[idx] -= U_diag_data[j] * uext[jcol]; } } /* diagonal scaling (contribution from D. Note: D is stored as its inverse) */ uext[idx] *= D[i]; } } /* Update solution */ hypre_ParVectorAxpy(beta, utemp, u); return hypre_error_flag; } #ifdef HYPRE_USING_CUDA /* Permutation function (for GPU version, can just call thrust) * option 00: perm integer array * option 01: rperm integer array * option 10: perm real array * option 11: rperm real array * */ HYPRE_Int hypre_ILUSeqVectorPerm(void *vectori, void *vectoro, HYPRE_Int size, HYPRE_Int *perm, HYPRE_Int option) { cudaDeviceSynchronize(); HYPRE_Int i; switch (option) { case 00: { HYPRE_Int *ivectori = (HYPRE_Int *) vectori; HYPRE_Int *ivectoro = (HYPRE_Int *) vectoro; for (i = 0 ; i < size ; i ++) { ivectoro[i] = ivectori[perm[i]]; } break; } case 01: { HYPRE_Int *ivectori = (HYPRE_Int *) vectori; HYPRE_Int *ivectoro = (HYPRE_Int *) vectoro; for (i = 0 ; i < size ; i ++) { ivectoro[perm[i]] = ivectori[i]; } break; } case 10: { HYPRE_Real *dvectori = (HYPRE_Real *) vectori; HYPRE_Real *dvectoro = (HYPRE_Real *) vectoro; for (i = 0 ; i < size ; i ++) { dvectoro[i] = dvectori[perm[i]]; } break; } case 11: { HYPRE_Real *dvectori = (HYPRE_Real *) vectori; HYPRE_Real *dvectoro = (HYPRE_Real *) vectoro; for (i = 0 ; i < size ; i ++) { dvectoro[perm[i]] = dvectori[i]; } break; } default: { printf("Error option in ILUSeqVectorPerm"); hypre_assert(1 == 0); } } return hypre_error_flag; } /* Incomplete LU solve (GPU) * L, D and U factors only have local scope (no off-diagonal processor terms) * so apart from the residual calculation (which uses A), the solves with the * L and U factors are local. */ HYPRE_Int hypre_ILUSolveCusparseLU(hypre_ParCSRMatrix *A, cusparseMatDescr_t matL_des, cusparseMatDescr_t matU_des, csrsv2Info_t matL_info, csrsv2Info_t matU_info, hypre_CSRMatrix *matLU_d, cusparseSolvePolicy_t ilu_solve_policy, void *ilu_solve_buffer, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm, HYPRE_Int n, hypre_ParVector *ftemp, hypre_ParVector *utemp) { /* Only solve when we have stuffs to be solved */ if (n == 0) { return hypre_error_flag; } /* ILU data */ HYPRE_Real *LU_data = hypre_CSRMatrixData(matLU_d); HYPRE_Int *LU_i = hypre_CSRMatrixI(matLU_d); HYPRE_Int *LU_j = hypre_CSRMatrixJ(matLU_d); HYPRE_Int nnz = LU_i[n]; hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp); HYPRE_Real *utemp_data = hypre_VectorData(utemp_local); hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp); HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local); HYPRE_Real alpha; HYPRE_Real beta; //HYPRE_Int i, j, k1, k2; HYPRE_Int isDoublePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double); HYPRE_Int isSinglePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double) / 2; hypre_assert(isDoublePrecision || isSinglePrecision); /* begin */ alpha = -1.0; beta = 1.0; cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle()); /* Initialize Utemp to zero. * This is necessary for correctness, when we use optimized * vector operations in the case where sizeof(L, D or U) < sizeof(A) */ //hypre_ParVectorSetConstantValues( utemp, 0.); /* compute residual */ hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp); /* apply permutation */ HYPRE_THRUST_CALL(gather, perm, perm + n, ftemp_data, utemp_data); if (isDoublePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, (hypre_double *) &beta, matL_des, (hypre_double *) LU_data, LU_i, LU_j, matL_info, (hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); /* U solve - Backward substitution */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, (hypre_double *) &beta, matU_des, (hypre_double *) LU_data, LU_i, LU_j, matU_info, (hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, (float *) &beta, matL_des, (float *) LU_data, LU_i, LU_j, matL_info, (float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); /* U solve - Backward substitution */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, nnz, (float *) &beta, matU_des, (float *) LU_data, LU_i, LU_j, matU_info, (float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } /* apply reverse permutation */ HYPRE_THRUST_CALL(scatter, utemp_data, utemp_data + n, perm, ftemp_data); /* Update solution */ hypre_ParVectorAxpy(beta, ftemp, u); return hypre_error_flag; } /* Schur Complement solve with GMRES on schur complement * ParCSRMatrix S is already built in ilu data sturcture, here directly use S * L, D and U factors only have local scope (no off-diagonal processor terms) * so apart from the residual calculation (which uses A), the solves with the * L and U factors are local. * S is the global Schur complement * schur_solver is a GMRES solver * schur_precond is the ILU preconditioner for GMRES * rhs and x are helper vector for solving Schur system */ HYPRE_Int hypre_ILUSolveCusparseSchurGMRES(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm, HYPRE_Int nLU, hypre_ParCSRMatrix *S, hypre_ParVector *ftemp, hypre_ParVector *utemp, HYPRE_Solver schur_solver, HYPRE_Solver schur_precond, hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end, cusparseMatDescr_t matL_des, cusparseMatDescr_t matU_des, csrsv2Info_t matBL_info, csrsv2Info_t matBU_info, csrsv2Info_t matSL_info, csrsv2Info_t matSU_info, hypre_CSRMatrix *matBLU_d, hypre_CSRMatrix *matE_d, hypre_CSRMatrix *matF_d, cusparseSolvePolicy_t ilu_solve_policy, void *ilu_solve_buffer) { /* If we don't have S block, just do one L solve and one U solve */ if (!S) { /* Just call BJ cusparse and return */ return hypre_ILUSolveCusparseLU(A, matL_des, matU_des, matBL_info, matBU_info, matBLU_d, ilu_solve_policy, ilu_solve_buffer, f, u, perm, nLU, ftemp, utemp); } /* data objects for communication */ // MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* data objects for temp vector */ hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp); HYPRE_Real *utemp_data = hypre_VectorData(utemp_local); hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp); HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local); hypre_Vector *rhs_local = hypre_ParVectorLocalVector(rhs); HYPRE_Real *rhs_data = hypre_VectorData(rhs_local); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); HYPRE_Real *x_data = hypre_VectorData(x_local); HYPRE_Real alpha; HYPRE_Real beta; //HYPRE_Real gamma; //HYPRE_Int i, j, k1, k2, col; /* problem size */ HYPRE_Int *BLU_i = NULL; HYPRE_Int *BLU_j = NULL; HYPRE_Real *BLU_data = NULL; HYPRE_Int BLU_nnz = 0; hypre_CSRMatrix *matSLU_d = hypre_ParCSRMatrixDiag(S); HYPRE_Int *SLU_i = hypre_CSRMatrixI(matSLU_d); HYPRE_Int *SLU_j = hypre_CSRMatrixJ(matSLU_d); HYPRE_Real *SLU_data = hypre_CSRMatrixData(matSLU_d); HYPRE_Int m = hypre_CSRMatrixNumRows(matSLU_d); HYPRE_Int n = nLU + m; HYPRE_Int SLU_nnz = SLU_i[m]; hypre_Vector *ftemp_upper = hypre_SeqVectorCreate(nLU); hypre_Vector *utemp_lower = hypre_SeqVectorCreate(m); hypre_VectorOwnsData(ftemp_upper) = 0; hypre_VectorOwnsData(utemp_lower) = 0; hypre_VectorData(ftemp_upper) = ftemp_data; hypre_VectorData(utemp_lower) = utemp_data + nLU; hypre_SeqVectorInitialize(ftemp_upper); hypre_SeqVectorInitialize(utemp_lower); if ( nLU > 0) { BLU_i = hypre_CSRMatrixI(matBLU_d); BLU_j = hypre_CSRMatrixJ(matBLU_d); BLU_data = hypre_CSRMatrixData(matBLU_d); BLU_nnz = BLU_i[nLU]; } /* begin */ beta = 1.0; alpha = -1.0; //gamma = 0.0; HYPRE_Int isDoublePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double); HYPRE_Int isSinglePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double) / 2; hypre_assert(isDoublePrecision || isSinglePrecision); cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle()); /* compute residual */ hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp); /* 1st need to solve LBi*xi = fi * L solve, solve xi put in u_temp upper */ /* apply permutation before we can start our solve */ HYPRE_THRUST_CALL(gather, perm, perm + n, ftemp_data, utemp_data); if (nLU > 0) { /* This solve won't touch data in utemp, thus, gi is still in utemp_lower */ if (isDoublePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &beta, matL_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info, (hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &beta, matL_des, (float *) BLU_data, BLU_i, BLU_j, matBL_info, (float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } /* 2nd need to compute g'i = gi - Ei*UBi^{-1}*xi * Ei*UBi^{-1} is exactly the matE_d here * Now: LBi^{-1}f_i is in ftemp_upper * gi' is in utemp_lower */ hypre_CSRMatrixMatvec(alpha, matE_d, ftemp_upper, beta, utemp_lower); } /* 3rd need to solve global Schur Complement M^{-1}Sy = M^{-1}g' * for now only solve the local system * solve y put in u_temp lower * only solve whe S is not NULL */ /* setup vectors for solve * rhs = M^{-1}g' */ if (m > 0) { if (isDoublePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (hypre_double *) &beta, matL_des, (hypre_double *) SLU_data, SLU_i, SLU_j, matSL_info, (hypre_double *) utemp_data + nLU, (hypre_double *) ftemp_data + nLU, ilu_solve_policy, ilu_solve_buffer)); /* U solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (hypre_double *) &beta, matU_des, (hypre_double *) SLU_data, SLU_i, SLU_j, matSU_info, (hypre_double *) ftemp_data + nLU, (hypre_double *) rhs_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (float *) &beta, matL_des, (float *) SLU_data, SLU_i, SLU_j, matSL_info, (float *) utemp_data + nLU, (float *) ftemp_data + nLU, ilu_solve_policy, ilu_solve_buffer)); /* U solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (float *) &beta, matU_des, (float *) SLU_data, SLU_i, SLU_j, matSU_info, (float *) ftemp_data + nLU, (float *) rhs_data, ilu_solve_policy, ilu_solve_buffer)); } } /* solve */ /* with tricky initial guess */ //hypre_Vector *tv = hypre_ParVectorLocalVector(x); //HYPRE_Real *tz = hypre_VectorData(tv); HYPRE_GMRESSolve(schur_solver, (HYPRE_Matrix)schur_precond, (HYPRE_Vector)rhs, (HYPRE_Vector)x); /* 4th need to compute zi = xi - LBi^-1*yi * put zi in f_temp upper * only do this computation when nLU < n * U is unsorted, search is expensive when unnecessary */ if (nLU > 0) { hypre_CSRMatrixMatvec(alpha, matF_d, x_local, beta, ftemp_upper); /* 5th need to solve UBi*ui = zi */ /* put result in u_temp upper */ if (isDoublePrecision) { /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &beta, matU_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info, (hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &beta, matU_des, (float *) BLU_data, BLU_i, BLU_j, matBU_info, (float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } } /* copy lower part solution into u_temp as well */ hypre_TMemcpy(utemp_data + nLU, x_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); /* perm back */ HYPRE_THRUST_CALL(scatter, utemp_data, utemp_data + n, perm, ftemp_data); /* done, now everything are in u_temp, update solution */ hypre_ParVectorAxpy(beta, ftemp, u); hypre_SeqVectorDestroy(ftemp_upper); hypre_SeqVectorDestroy(utemp_lower); return hypre_error_flag; } /* Schur Complement solve with GMRES on schur complement, RAP style * ParCSRMatrix S is already built in ilu data sturcture, here directly use S * L, D and U factors only have local scope (no off-diagonal processor terms) * so apart from the residual calculation (which uses A), the solves with the * L and U factors are local. * S is the global Schur complement * schur_solver is a GMRES solver * schur_precond is the ILU preconditioner for GMRES * rhs and x are helper vector for solving Schur system */ HYPRE_Int hypre_ILUSolveRAPGMRES(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm, HYPRE_Int nLU, hypre_ParCSRMatrix *S, hypre_ParVector *ftemp, hypre_ParVector *utemp, hypre_ParVector *xtemp, hypre_ParVector *ytemp, HYPRE_Solver schur_solver, HYPRE_Solver schur_precond, hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end, cusparseMatDescr_t matL_des, cusparseMatDescr_t matU_des, csrsv2Info_t matAL_info, csrsv2Info_t matAU_info, csrsv2Info_t matBL_info, csrsv2Info_t matBU_info, csrsv2Info_t matSL_info, csrsv2Info_t matSU_info, hypre_ParCSRMatrix *Aperm, hypre_CSRMatrix *matALU_d, hypre_CSRMatrix *matBLU_d, hypre_CSRMatrix *matE_d, hypre_CSRMatrix *matF_d, cusparseSolvePolicy_t ilu_solve_policy, void *ilu_solve_buffer, HYPRE_Int test_opt) { /* data objects for communication */ // MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* data objects for temp vector */ hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp); HYPRE_Real *utemp_data = hypre_VectorData(utemp_local); hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp); HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local); hypre_Vector *xtemp_local = hypre_ParVectorLocalVector(xtemp); HYPRE_Real *xtemp_data = hypre_VectorData(xtemp_local); //hypre_Vector *ytemp_local = hypre_ParVectorLocalVector(ytemp); //HYPRE_Real *ytemp_data = hypre_VectorData(ytemp_local); hypre_Vector *rhs_local = hypre_ParVectorLocalVector(rhs); HYPRE_Real *rhs_data = hypre_VectorData(rhs_local); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); HYPRE_Real *x_data = hypre_VectorData(x_local); //HYPRE_Int i, j, k1, k2, col; /* problem size */ HYPRE_Int *ALU_i = hypre_CSRMatrixI(matALU_d); HYPRE_Int *ALU_j = hypre_CSRMatrixJ(matALU_d); HYPRE_Real *ALU_data = hypre_CSRMatrixData(matALU_d); HYPRE_Int *BLU_i = hypre_CSRMatrixI(matBLU_d); HYPRE_Int *BLU_j = hypre_CSRMatrixJ(matBLU_d); HYPRE_Real *BLU_data = hypre_CSRMatrixData(matBLU_d); HYPRE_Int BLU_nnz = BLU_i[nLU]; hypre_CSRMatrix *matSLU_d = hypre_ParCSRMatrixDiag(S); HYPRE_Int *SLU_i = hypre_CSRMatrixI(matSLU_d); HYPRE_Int *SLU_j = hypre_CSRMatrixJ(matSLU_d); HYPRE_Real *SLU_data = hypre_CSRMatrixData(matSLU_d); HYPRE_Int m = hypre_CSRMatrixNumRows(matSLU_d); HYPRE_Int n = nLU + m; HYPRE_Int SLU_nnz = SLU_i[m]; HYPRE_Int ALU_nnz = ALU_i[n]; hypre_Vector *ftemp_upper = hypre_SeqVectorCreate(nLU); hypre_Vector *utemp_lower = hypre_SeqVectorCreate(m); hypre_VectorOwnsData(ftemp_upper) = 0; hypre_VectorOwnsData(utemp_lower) = 0; hypre_VectorData(ftemp_upper) = ftemp_data; hypre_VectorData(utemp_lower) = utemp_data + nLU; hypre_SeqVectorInitialize(ftemp_upper); hypre_SeqVectorInitialize(utemp_lower); /* begin */ HYPRE_Real one = 1.0; HYPRE_Real mone = -1.0; HYPRE_Real zero = 0.0; HYPRE_Int isDoublePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double); HYPRE_Int isSinglePrecision = sizeof(HYPRE_Complex) == sizeof(hypre_double) / 2; hypre_assert(isDoublePrecision || isSinglePrecision); cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle()); switch (test_opt) { case 1: case 3: { /* E and F */ /* compute residual */ hypre_ParCSRMatrixMatvecOutOfPlace(mone, A, u, one, f, utemp); /* apply permutation before we can start our solve * Au=f -> (PAQ)Q'u=Pf */ HYPRE_THRUST_CALL(gather, perm, perm + n, utemp_data, ftemp_data); /* A-smoothing * x = [UA\(LA\(P*f_u))] fill to xtemp */ if (n > 0) { if (isDoublePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, ALU_nnz, (hypre_double *) &one, matL_des, (hypre_double *) ALU_data, ALU_i, ALU_j, matAL_info, (hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, ALU_nnz, (hypre_double *) &one, matU_des, (hypre_double *) ALU_data, ALU_i, ALU_j, matAU_info, (hypre_double *) utemp_data, (hypre_double *) xtemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, ALU_nnz, (float *) &one, matL_des, (float *) ALU_data, ALU_i, ALU_j, matAL_info, (float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, ALU_nnz, (float *) &one, matU_des, (float *) ALU_data, ALU_i, ALU_j, matAU_info, (float *) utemp_data, (float *) xtemp_data, ilu_solve_policy, ilu_solve_buffer)); } } /* residual, we should not touch xtemp for now * r = R*(f-PAQx) */ hypre_ParCSRMatrixMatvec(mone, Aperm, xtemp, one, ftemp); /* with R is complex */ /* copy partial data in */ hypre_TMemcpy( rhs_data, ftemp_data + nLU, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); /* solve L^{-1} */ if (nLU > 0) { if (isDoublePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &one, matL_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info, (hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &one, matL_des, (float *) BLU_data, BLU_i, BLU_j, matBL_info, (float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } /* -U^{-1}L^{-1} */ if (isDoublePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &one, matU_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info, (hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &one, matU_des, (float *) BLU_data, BLU_i, BLU_j, matBU_info, (float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } } /* -EU^{-1}L^{-1} */ hypre_CSRMatrixMatvec(mone, matE_d, ftemp_upper, one, rhs_local); /* now solve S */ if (S) { /* if we have a schur complement */ hypre_ParVectorSetConstantValues(x, 0.0); HYPRE_GMRESSolve(schur_solver, (HYPRE_Matrix)schur_precond, (HYPRE_Vector)rhs, (HYPRE_Vector)x); /* u = xtemp + P*x */ /* -Fx */ hypre_CSRMatrixMatvec(mone, matF_d, x_local, zero, ftemp_upper); /* -L^{-1}Fx */ if (nLU > 0) { if (isDoublePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &one, matL_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info, (hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &one, matL_des, (float *) BLU_data, BLU_i, BLU_j, matBL_info, (float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } /* -U{-1}L^{-1}Fx */ if (isDoublePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &one, matU_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info, (hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &one, matU_des, (float *) BLU_data, BLU_i, BLU_j, matBU_info, (float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } } /* now copy data to y_lower */ hypre_TMemcpy( ftemp_data + nLU, x_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); /* correction to the residual */ hypre_ParVectorAxpy(one, ftemp, xtemp); } else { /* otherwise just apply triangular solves */ if (m > 0) { if (isDoublePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (hypre_double *) &one, matL_des, (hypre_double *) SLU_data, SLU_i, SLU_j, matSL_info, (hypre_double *) rhs_data, (hypre_double *) x_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (float *) &one, matL_des, (float *) SLU_data, SLU_i, SLU_j, matSL_info, (float *) rhs_data, (float *) x_data, ilu_solve_policy, ilu_solve_buffer)); } if (isDoublePrecision) { /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (hypre_double *) &one, matU_des, (hypre_double *) SLU_data, SLU_i, SLU_j, matSU_info, (hypre_double *) x_data, (hypre_double *) rhs_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (float *) &one, matU_des, (float *) SLU_data, SLU_i, SLU_j, matSU_info, (float *) x_data, (float *) rhs_data, ilu_solve_policy, ilu_solve_buffer)); } } /* u = xtemp + P*x */ /* -Fx */ hypre_CSRMatrixMatvec(mone, matF_d, rhs_local, zero, ftemp_upper); /* -L^{-1}Fx */ if (nLU > 0) { if (isDoublePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &one, matL_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info, (hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &one, matL_des, (float *) BLU_data, BLU_i, BLU_j, matBL_info, (float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } /* -U{-1}L^{-1}Fx */ if (isDoublePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &one, matU_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info, (hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &one, matU_des, (float *) BLU_data, BLU_i, BLU_j, matBU_info, (float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } } /* now copy data to y_lower */ hypre_TMemcpy( ftemp_data + nLU, rhs_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); hypre_ParVectorAxpy(one, ftemp, xtemp); } /* perm back */ HYPRE_THRUST_CALL(scatter, xtemp_data, xtemp_data + n, perm, ftemp_data); /* done, now everything are in u_temp, update solution */ hypre_ParVectorAxpy(one, ftemp, u); } break; case 0: case 2: default: { /* EU^{-1} and L^{-1}F */ /* compute residual */ hypre_ParCSRMatrixMatvecOutOfPlace(mone, A, u, one, f, ftemp); /* apply permutation before we can start our solve * Au=f -> (PAQ)Q'u=Pf */ HYPRE_THRUST_CALL(gather, perm, perm + n, ftemp_data, utemp_data); /* A-smoothing * x = [UA\(LA\(P*f_u))] fill to xtemp */ if (n > 0) { if (isDoublePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, ALU_nnz, (hypre_double *) &one, matL_des, (hypre_double *) ALU_data, ALU_i, ALU_j, matAL_info, (hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, ALU_nnz, (hypre_double *) &one, matU_des, (hypre_double *) ALU_data, ALU_i, ALU_j, matAU_info, (hypre_double *) ftemp_data, (hypre_double *) xtemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, ALU_nnz, (float *) &one, matL_des, (float *) ALU_data, ALU_i, ALU_j, matAL_info, (float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, n, ALU_nnz, (float *) &one, matU_des, (float *) ALU_data, ALU_i, ALU_j, matAU_info, (float *) ftemp_data, (float *) xtemp_data, ilu_solve_policy, ilu_solve_buffer)); } } /* residual, we should not touch xtemp for now * r = R*(f-PAQx) */ hypre_ParCSRMatrixMatvec(mone, Aperm, xtemp, one, utemp); /* with R is complex */ /* copy partial data in */ hypre_TMemcpy( rhs_data, utemp_data + nLU, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); /* solve L^{-1} */ if (nLU > 0) { if (isDoublePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &one, matL_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBL_info, (hypre_double *) utemp_data, (hypre_double *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &one, matL_des, (float *) BLU_data, BLU_i, BLU_j, matBL_info, (float *) utemp_data, (float *) ftemp_data, ilu_solve_policy, ilu_solve_buffer)); } } /* -EU^{-1}L^{-1} */ hypre_CSRMatrixMatvec(mone, matE_d, ftemp_upper, one, rhs_local); /* now solve S */ if (S) { /* if we have a schur complement */ hypre_ParVectorSetConstantValues(x, 0.0); HYPRE_GMRESSolve(schur_solver, (HYPRE_Matrix)schur_precond, (HYPRE_Vector)rhs, (HYPRE_Vector)x); /* u = xtemp + P*x */ /* -L^{-1}Fx */ hypre_CSRMatrixMatvec(mone, matF_d, x_local, zero, ftemp_upper); /* -U{-1}L^{-1}Fx */ if (nLU > 0) { if (isDoublePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &one, matU_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info, (hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &one, matU_des, (float *) BLU_data, BLU_i, BLU_j, matBU_info, (float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } } /* now copy data to y_lower */ hypre_TMemcpy( utemp_data + nLU, x_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); hypre_ParVectorAxpy(one, utemp, xtemp); } else { /* otherwise just apply triangular solves */ if (m > 0) { if (isDoublePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (hypre_double *) &one, matL_des, (hypre_double *) SLU_data, SLU_i, SLU_j, matSL_info, (hypre_double *) rhs_data, (hypre_double *) x_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* L solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (float *) &one, matL_des, (float *) SLU_data, SLU_i, SLU_j, matSL_info, (float *) rhs_data, (float *) x_data, ilu_solve_policy, ilu_solve_buffer)); } if (isDoublePrecision) { /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (hypre_double *) &one, matU_des, (hypre_double *) SLU_data, SLU_i, SLU_j, matSU_info, (hypre_double *) x_data, (hypre_double *) rhs_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* U solve - Forward solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, m, SLU_nnz, (float *) &one, matU_des, (float *) SLU_data, SLU_i, SLU_j, matSU_info, (float *) x_data, (float *) rhs_data, ilu_solve_policy, ilu_solve_buffer)); } } /* u = xtemp + P*x */ /* -L^{-1}Fx */ hypre_CSRMatrixMatvec(mone, matF_d, rhs_local, zero, ftemp_upper); /* -U{-1}L^{-1}Fx */ if (nLU > 0) { if (isDoublePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseDcsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (hypre_double *) &one, matU_des, (hypre_double *) BLU_data, BLU_i, BLU_j, matBU_info, (hypre_double *) ftemp_data, (hypre_double *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } else if (isSinglePrecision) { /* U solve */ HYPRE_CUSPARSE_CALL(cusparseScsrsv2_solve(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, nLU, BLU_nnz, (float *) &one, matU_des, (float *) BLU_data, BLU_i, BLU_j, matBU_info, (float *) ftemp_data, (float *) utemp_data, ilu_solve_policy, ilu_solve_buffer)); } } /* now copy data to y_lower */ hypre_TMemcpy( utemp_data + nLU, rhs_data, HYPRE_Real, m, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); hypre_ParVectorAxpy(one, utemp, xtemp); } /* perm back */ HYPRE_THRUST_CALL(scatter, xtemp_data, xtemp_data + n, perm, ftemp_data); /* done, now everything are in u_temp, update solution */ hypre_ParVectorAxpy(one, ftemp, u); } break; } return hypre_error_flag; } #endif HYPRE_Int hypre_ILUSolveRAPGMRESHOST(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int *perm, HYPRE_Int nLU, hypre_ParCSRMatrix *L, HYPRE_Real *D, hypre_ParCSRMatrix *U, hypre_ParCSRMatrix *mL, HYPRE_Real *mD, hypre_ParCSRMatrix *mU, hypre_ParVector *ftemp, hypre_ParVector *utemp, hypre_ParVector *xtemp, hypre_ParVector *ytemp, HYPRE_Solver schur_solver, HYPRE_Solver schur_precond, hypre_ParVector *rhs, hypre_ParVector *x, HYPRE_Int *u_end) { //#pragma omp parallel // printf("threads %d\n",omp_get_num_threads()); /* data objects for communication */ // MPI_Comm comm = hypre_ParCSRMatrixComm(A); /* data objects for L and U */ hypre_CSRMatrix *L_diag = hypre_ParCSRMatrixDiag(L); HYPRE_Real *L_diag_data = hypre_CSRMatrixData(L_diag); HYPRE_Int *L_diag_i = hypre_CSRMatrixI(L_diag); HYPRE_Int *L_diag_j = hypre_CSRMatrixJ(L_diag); hypre_CSRMatrix *U_diag = hypre_ParCSRMatrixDiag(U); HYPRE_Real *U_diag_data = hypre_CSRMatrixData(U_diag); HYPRE_Int *U_diag_i = hypre_CSRMatrixI(U_diag); HYPRE_Int *U_diag_j = hypre_CSRMatrixJ(U_diag); hypre_CSRMatrix *mL_diag = hypre_ParCSRMatrixDiag(mL); HYPRE_Real *mL_diag_data = hypre_CSRMatrixData(mL_diag); HYPRE_Int *mL_diag_i = hypre_CSRMatrixI(mL_diag); HYPRE_Int *mL_diag_j = hypre_CSRMatrixJ(mL_diag); hypre_CSRMatrix *mU_diag = hypre_ParCSRMatrixDiag(mU); HYPRE_Real *mU_diag_data = hypre_CSRMatrixData(mU_diag); HYPRE_Int *mU_diag_i = hypre_CSRMatrixI(mU_diag); HYPRE_Int *mU_diag_j = hypre_CSRMatrixJ(mU_diag); hypre_Vector *utemp_local = hypre_ParVectorLocalVector(utemp); HYPRE_Real *utemp_data = hypre_VectorData(utemp_local); hypre_Vector *ftemp_local = hypre_ParVectorLocalVector(ftemp); HYPRE_Real *ftemp_data = hypre_VectorData(ftemp_local); hypre_Vector *xtemp_local = NULL; HYPRE_Real *xtemp_data = NULL; hypre_Vector *ytemp_local = NULL; HYPRE_Real *ytemp_data = NULL; if (xtemp) { /* xtemp might be null when we have no Schur complement */ xtemp_local = hypre_ParVectorLocalVector(xtemp); xtemp_data = hypre_VectorData(xtemp_local); ytemp_local = hypre_ParVectorLocalVector(ytemp); ytemp_data = hypre_VectorData(ytemp_local); } HYPRE_Real alpha; HYPRE_Real beta; HYPRE_Int i, j, k1, k2, col; /* problem size */ HYPRE_Int n = hypre_CSRMatrixNumRows(L_diag); HYPRE_Int m = n - nLU; /* other data objects for computation */ //hypre_Vector *f_local; //HYPRE_Real *f_data; hypre_Vector *rhs_local; HYPRE_Real *rhs_data; hypre_Vector *x_local; HYPRE_Real *x_data; /* begin */ beta = 1.0; alpha = -1.0; if (m > 0) { /* setup vectors for solve */ rhs_local = hypre_ParVectorLocalVector(rhs); rhs_data = hypre_VectorData(rhs_local); x_local = hypre_ParVectorLocalVector(x); x_data = hypre_VectorData(x_local); } /* only support RAP with partial factorized W and Z */ /* compute residual */ hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp); /* A-smoothing f_temp = [UA \ LA \ (f_temp[perm])] */ /* permuted L solve */ for (i = 0 ; i < n ; i ++) { utemp_data[i] = ftemp_data[perm[i]]; k1 = L_diag_i[i] ; k2 = L_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { col = L_diag_j[j]; utemp_data[i] -= L_diag_data[j] * utemp_data[col]; } } if (!xtemp) { /* in this case, we don't have a Schur complement */ /* U solve */ for (i = n - 1 ; i >= 0 ; i --) { ftemp_data[perm[i]] = utemp_data[i]; k1 = U_diag_i[i] ; k2 = U_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { col = U_diag_j[j]; ftemp_data[perm[i]] -= U_diag_data[j] * ftemp_data[perm[col]]; } ftemp_data[perm[i]] *= D[i]; } hypre_ParVectorAxpy(beta, ftemp, u); return hypre_error_flag; } /* U solve */ for (i = n - 1 ; i >= 0 ; i --) { xtemp_data[perm[i]] = utemp_data[i]; k1 = U_diag_i[i] ; k2 = U_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { col = U_diag_j[j]; xtemp_data[perm[i]] -= U_diag_data[j] * xtemp_data[perm[col]]; } xtemp_data[perm[i]] *= D[i]; } /* coarse-grid correction */ /* now f_temp is the result of A-smoothing * rhs = R*(b - Ax) * */ // utemp = (ftemp - A*xtemp) hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, xtemp, beta, ftemp, utemp); // R = [-L21 L\inv, I] if ( m > 0) { /* first is L solve */ for (i = 0 ; i < nLU ; i ++) { ytemp_data[i] = utemp_data[perm[i]]; k1 = mL_diag_i[i] ; k2 = mL_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { col = mL_diag_j[j]; ytemp_data[i] -= mL_diag_data[j] * ytemp_data[col]; } } /* apply -W * ytemp on this, and take care of the I part */ for (i = nLU ; i < n ; i ++) { rhs_data[i - nLU] = utemp_data[perm[i]]; k1 = mL_diag_i[i] ; k2 = u_end[i]; for (j = k1 ; j < k2 ; j ++) { col = mL_diag_j[j]; rhs_data[i - nLU] -= mL_diag_data[j] * ytemp_data[col]; } } } /* now the rhs is ready */ hypre_SeqVectorSetConstantValues(x_local, 0.0); HYPRE_GMRESSolve(schur_solver, (HYPRE_Matrix)schur_precond, (HYPRE_Vector)rhs, (HYPRE_Vector)x); if (m > 0) { /* for(i = 0 ; i < m ; i ++) { x_data[i] = rhs_data[i]; k1 = u_end[i+nLU] ; k2 = mL_diag_i[i+nLU+1]; for(j = k1 ; j < k2 ; j ++) { col = mL_diag_j[j]; x_data[i] -= mL_diag_data[j] * x_data[col-nLU]; } } for(i = m-1 ; i >= 0 ; i --) { rhs_data[i] = x_data[i]; k1 = mU_diag_i[i+nLU] ; k2 = mU_diag_i[i+1+nLU]; for(j = k1 ; j < k2 ; j ++) { col = mU_diag_j[j]; rhs_data[i] -= mU_diag_data[j] * rhs_data[col-nLU]; } rhs_data[i] *= mD[i]; } */ /* after solve, update x = x + Pv * that is, xtemp = xtemp + P*x */ /* first compute P*x * P = [ -U\inv U_12 ] * [ I ] */ /* matvec */ for (i = 0 ; i < nLU ; i ++) { ytemp_data[i] = 0.0; k1 = u_end[i] ; k2 = mU_diag_i[i + 1]; for (j = k1 ; j < k2 ; j ++) { col = mU_diag_j[j]; ytemp_data[i] -= mU_diag_data[j] * x_data[col - nLU]; } } /* U solve */ for (i = nLU - 1 ; i >= 0 ; i --) { ftemp_data[perm[i]] = ytemp_data[i]; k1 = mU_diag_i[i] ; k2 = u_end[i]; for (j = k1 ; j < k2 ; j ++) { col = mU_diag_j[j]; ftemp_data[perm[i]] -= mU_diag_data[j] * ftemp_data[perm[col]]; } ftemp_data[perm[i]] *= mD[i]; } /* update with I */ for (i = nLU ; i < n ; i ++) { ftemp_data[perm[i]] = x_data[i - nLU]; } hypre_ParVectorAxpy(beta, ftemp, u); } hypre_ParVectorAxpy(beta, xtemp, u); return hypre_error_flag; } /* solve functions for NSH */ /*-------------------------------------------------------------------- * hypre_NSHSolve *--------------------------------------------------------------------*/ HYPRE_Int hypre_NSHSolve( void *nsh_vdata, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u ) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); // HYPRE_Int i; hypre_ParNSHData *nsh_data = (hypre_ParNSHData*) nsh_vdata; /* get matrices */ hypre_ParCSRMatrix *matA = hypre_ParNSHDataMatA(nsh_data); hypre_ParCSRMatrix *matM = hypre_ParNSHDataMatM(nsh_data); HYPRE_Int iter, num_procs, my_id; hypre_ParVector *F_array = hypre_ParNSHDataF(nsh_data); hypre_ParVector *U_array = hypre_ParNSHDataU(nsh_data); /* get settings */ HYPRE_Real tol = hypre_ParNSHDataTol(nsh_data); HYPRE_Int logging = hypre_ParNSHDataLogging(nsh_data); HYPRE_Int print_level = hypre_ParNSHDataPrintLevel(nsh_data); HYPRE_Int max_iter = hypre_ParNSHDataMaxIter(nsh_data); HYPRE_Real *norms = hypre_ParNSHDataRelResNorms(nsh_data); hypre_ParVector *Ftemp = hypre_ParNSHDataFTemp(nsh_data); hypre_ParVector *Utemp = hypre_ParNSHDataUTemp(nsh_data); hypre_ParVector *residual; HYPRE_Real alpha = -1.0; HYPRE_Real beta = 1.0; HYPRE_Real conv_factor = 0.0; HYPRE_Real resnorm = 1.0; HYPRE_Real init_resnorm = 0.0; HYPRE_Real rel_resnorm; HYPRE_Real rhs_norm = 0.0; HYPRE_Real old_resnorm; HYPRE_Real ieee_check = 0.; HYPRE_Real operat_cmplxty = hypre_ParNSHDataOperatorComplexity(nsh_data); HYPRE_Int Solve_err_flag; /* problem size */ // HYPRE_Int n = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* begin */ if (logging > 1) { residual = hypre_ParNSHDataResidual(nsh_data); } hypre_ParNSHDataNumIterations(nsh_data) = 0; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /*----------------------------------------------------------------------- * Write the solver parameters *-----------------------------------------------------------------------*/ if (my_id == 0 && print_level > 1) { hypre_NSHWriteSolverParams(nsh_data); } /*----------------------------------------------------------------------- * Initialize the solver error flag *-----------------------------------------------------------------------*/ Solve_err_flag = 0; /*----------------------------------------------------------------------- * write some initial info *-----------------------------------------------------------------------*/ if (my_id == 0 && print_level > 1 && tol > 0.) { hypre_printf("\n\n Newton–Schulz–Hotelling SOLVER SOLUTION INFO:\n"); } /*----------------------------------------------------------------------- * Compute initial residual and print *-----------------------------------------------------------------------*/ if (print_level > 1 || logging > 1 || tol > 0.) { if ( logging > 1 ) { hypre_ParVectorCopy(f, residual ); if (tol > 0.0) { hypre_ParCSRMatrixMatvec(alpha, A, u, beta, residual ); } resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual )); } else { hypre_ParVectorCopy(f, Ftemp); if (tol > 0.0) { hypre_ParCSRMatrixMatvec(alpha, A, u, beta, Ftemp); } resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp)); } /* Since it is does not diminish performance, attempt to return an error flag and notify users when they supply bad input. */ if (resnorm != 0.) { ieee_check = resnorm / resnorm; /* INF -> NaN conversion */ } if (ieee_check != ieee_check) { /* ...INFs or NaNs in input can make ieee_check a NaN. This test for ieee_check self-equality works on all IEEE-compliant compilers/ machines, c.f. page 8 of "Lecture Notes on the Status of IEEE 754" by W. Kahan, May 31, 1996. Currently (July 2002) this paper may be found at http://HTTP.CS.Berkeley.EDU/~wkahan/ieee754status/IEEE754.PDF */ if (print_level > 0) { hypre_printf("\n\nERROR detected by Hypre ... BEGIN\n"); hypre_printf("ERROR -- hypre_NSHSolve: INFs and/or NaNs detected in input.\n"); hypre_printf("User probably placed non-numerics in supplied A, x_0, or b.\n"); hypre_printf("ERROR detected by Hypre ... END\n\n\n"); } hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } init_resnorm = resnorm; rhs_norm = sqrt(hypre_ParVectorInnerProd(f, f)); if (rhs_norm > HYPRE_REAL_EPSILON) { rel_resnorm = init_resnorm / rhs_norm; } else { /* rhs is zero, return a zero solution */ hypre_ParVectorSetConstantValues(U_array, 0.0); if (logging > 0) { rel_resnorm = 0.0; hypre_ParNSHDataFinalRelResidualNorm(nsh_data) = rel_resnorm; } return hypre_error_flag; } } else { rel_resnorm = 1.; } if (my_id == 0 && print_level > 1) { hypre_printf(" relative\n"); hypre_printf(" residual factor residual\n"); hypre_printf(" -------- ------ --------\n"); hypre_printf(" Initial %e %e\n", init_resnorm, rel_resnorm); } matA = A; U_array = u; F_array = f; /************** Main Solver Loop - always do 1 iteration ************/ iter = 0; while ((rel_resnorm >= tol || iter < 1) && iter < max_iter) { /* Do one solve on e = Mr */ hypre_NSHSolveInverse(matA, f, u, matM, Utemp, Ftemp); /*--------------------------------------------------------------- * Compute residual and residual norm *----------------------------------------------------------------*/ if (print_level > 1 || logging > 1 || tol > 0.) { old_resnorm = resnorm; if ( logging > 1 ) { hypre_ParVectorCopy(F_array, residual); hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, residual ); resnorm = sqrt(hypre_ParVectorInnerProd( residual, residual )); } else { hypre_ParVectorCopy(F_array, Ftemp); hypre_ParCSRMatrixMatvec(alpha, matA, U_array, beta, Ftemp); resnorm = sqrt(hypre_ParVectorInnerProd(Ftemp, Ftemp)); } if (old_resnorm) { conv_factor = resnorm / old_resnorm; } else { conv_factor = resnorm; } if (rhs_norm > HYPRE_REAL_EPSILON) { rel_resnorm = resnorm / rhs_norm; } else { rel_resnorm = resnorm; } norms[iter] = rel_resnorm; } ++iter; hypre_ParNSHDataNumIterations(nsh_data) = iter; hypre_ParNSHDataFinalRelResidualNorm(nsh_data) = rel_resnorm; if (my_id == 0 && print_level > 1) { hypre_printf(" NSHSolve %2d %e %f %e \n", iter, resnorm, conv_factor, rel_resnorm); } } /* check convergence within max_iter */ if (iter == max_iter && tol > 0.) { Solve_err_flag = 1; hypre_error(HYPRE_ERROR_CONV); } /*----------------------------------------------------------------------- * Print closing statistics * Add operator and grid complexity stats *-----------------------------------------------------------------------*/ if (iter > 0 && init_resnorm) { conv_factor = pow((resnorm / init_resnorm), (1.0 / (HYPRE_Real) iter)); } else { conv_factor = 1.; } if (print_level > 1) { /*** compute operator and grid complexity (fill factor) here ?? ***/ if (my_id == 0) { if (Solve_err_flag == 1) { hypre_printf("\n\n=============================================="); hypre_printf("\n NOTE: Convergence tolerance was not achieved\n"); hypre_printf(" within the allowed %d iterations\n", max_iter); hypre_printf("=============================================="); } hypre_printf("\n\n Average Convergence Factor = %f \n", conv_factor); hypre_printf(" operator = %f\n", operat_cmplxty); } } return hypre_error_flag; } /* NSH solve * Simply a matvec on residual with approximate inverse * A: original matrix * f: rhs * u: solution * M: approximate inverse * ftemp, utemp: working vectors */ HYPRE_Int hypre_NSHSolveInverse(hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, hypre_ParCSRMatrix *M, hypre_ParVector *ftemp, hypre_ParVector *utemp) { HYPRE_Real alpha; HYPRE_Real beta; /* begin */ alpha = -1.0; beta = 1.0; /* r = f-Au */ hypre_ParCSRMatrixMatvecOutOfPlace(alpha, A, u, beta, f, ftemp); /* e = Mr */ hypre_ParCSRMatrixMatvec(1.0, M, ftemp, 0.0, utemp); /* u = u + e */ hypre_ParVectorAxpy(beta, utemp, u); return hypre_error_flag; }
Example_atomic_restrict.1.c
/* * @@name: atomic_restrict.1c * @@type: C * @@compilable: maybe * @@linkable: no * @@expect: failure * @@version: omp_3.1 */ void atomic_wrong () { union {int n; float x;} u; #pragma omp parallel { #pragma omp atomic update u.n++; #pragma omp atomic update u.x += 1.0; /* Incorrect because the atomic constructs reference the same location through incompatible types */ } }
hello_openmp.c
#include <stdlib.h> #include <stdio.h> #include <omp.h> #include <sys/utsname.h> int main(int argc, char **argv) { int num_threads; // Serial code printf("This is the serial section\n"); if (argc < 2){ fprintf(stderr, "Usage: ./hello_world [NUM_THREADS]\n"); exit(1); } num_threads = strtol(argv[1], NULL, 10); omp_set_dynamic(0); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} omp_set_num_threads(num_threads); #pragma omp parallel { int thread_num = omp_get_thread_num(); struct utsname ugnm; num_threads = omp_get_num_threads(); uname(&ugnm); printf("Hello World from thread %d of %d, running on %s.\n", thread_num, num_threads, ugnm.nodename); gtmp_barrier(); printf("Hello World from thread %d of %d, running on %s.\n", thread_num, num_threads, ugnm.nodename); } // implied barrier gtmp_finalize(); // Resume serial code printf("Back in the serial section again\n"); return 0; }
SuperRayGenerator.h
/* * Copyright(c) 2016, Youngsun Kwon, Donghyuk Kim, and Sung-eui Yoon, KAIST * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met : * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and / or other materials provided with the distribution. * * Neither the name of SuperRay nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef QUADMAP_SUPERRAY_SUPERRAY_GENERATOR_H #define QUADMAP_SUPERRAY_SUPERRAY_GENERATOR_H #include <quadmap/quadmap_types.h> #include <quadmap/QuadTreeKey.h> #include <quadmap/Pointcloud.h> #include <quadmap_superray/SuperRayCloud.h> #ifdef _OPENMP #include <omp.h> #pragma omp declare reduction (merge : std::vector<quadmap::SuperRay> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) #endif namespace quadmap{ class SuperRayGenerator{ public: SuperRayGenerator(const double _resolution, const unsigned int _tree_max_val, const int _threshold = 0); ~SuperRayGenerator() {}; void GenerateSuperRay(const Pointcloud& _pc, const point2d& _origin, SuperRayCloud& _srcloud); protected: struct PixelInfo; struct Axis2D; point2d originW; // origin point in World Space QuadTreeKey originKey; // origin key // constants for generating super rays double RESOLUTION; // resolution double RESOLUTION_FACTOR; // 1.0 / resolution unsigned int TREE_MAX_VAL; // offset unsigned int THRESHOLD; // threshold for limiting to generate super rays for each pixel // Functions for generating super rays void GenerateSuperRay(const point2d_collection& _pointlist, std::vector<SuperRay>& _srcloud); void GenerateSuperRay2D(const point2d_collection& _pointlist, Axis2D& _axis, PixelInfo& _pixelinfo, std::vector<SuperRay>& _srcloud); // Function for generating mapping line in 2-D double GenerateMappingLine(PixelInfo& _pixelinfo, const unsigned int& _axisX, const unsigned int& _axisY, std::vector<double>& _mappingPlane); // Utility functions typedef unordered_ns::unordered_map<QuadTreeKey, std::vector<point2d>, QuadTreeKey::KeyHash> Voxelized_Pointclouds; void ComputeAxis(const point2d& _min, const point2d& _max, Axis2D& _axis); // Re-implmentation for Key / coordinate conversion functions inline QuadTreeKey coordToKey(const point2d& coord) const { return QuadTreeKey(coordToKey(coord(0)), coordToKey(coord(1))); } inline key_type coordToKey(double coordinate) const { return ((int)floor(RESOLUTION_FACTOR * coordinate)) + TREE_MAX_VAL; } // Structures that represents the traversal information struct PixelInfo{ PixelInfo(void) {}; // Pixel Info. point2d minW; // min position of pixel point2d maxW; // max position of pixel QuadTreeKey pixelKey; // key of pixel }; struct Axis2D{ Axis2D(void) : axisU(0), axisV(1) {}; unsigned int axisU; // Nearest Axis unsigned int axisV; // Farthest Axis }; }; } #endif
util.c
/*- * Copyright (c) 2012-2017 Ilya Kaliman * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include <ctype.h> #include "private.h" #include "util.h" int efp_skip_frag_pair(const struct efp *efp, size_t fr_i_idx, size_t fr_j_idx) { size_t idx = fr_i_idx * efp->n_frag + fr_j_idx; if (efp->skiplist[idx]) return 1; if (!efp->opts.enable_cutoff) return 0; const struct frag *fr_i = efp->frags + fr_i_idx; const struct frag *fr_j = efp->frags + fr_j_idx; double cutoff2 = efp->opts.swf_cutoff * efp->opts.swf_cutoff; vec_t dr = vec_sub(CVEC(fr_j->x), CVEC(fr_i->x)); if (efp->opts.enable_pbc) { // orthogonal box if (efp->box.a == 90.0 && efp->box.b == 90.0 && efp->box.c == 90.0) { vec_t cell = {efp->box.x * round(dr.x / efp->box.x), efp->box.y * round(dr.y / efp->box.y), efp->box.z * round(dr.z / efp->box.z)}; dr = vec_sub(&dr, &cell); } else { cart_to_frac(efp->box, &dr); vec_t cell = {round(dr.x), round(dr.y), round(dr.z)}; dr = vec_sub(&dr, &cell); frac_to_cart(efp->box, &dr); } } return vec_len_2(&dr) > cutoff2; } struct swf efp_make_swf(const struct efp *efp, const struct frag *fr_i, const struct frag *fr_j, int short_range_cutoff) { struct swf swf; memset(&swf, 0, sizeof(swf)); swf.swf = 1.0; swf.dr = vec_sub(CVEC(fr_j->x), CVEC(fr_i->x)); if (!efp->opts.enable_cutoff) return swf; if (efp->opts.enable_pbc) { if (efp->box.a == 90.0 && efp->box.b == 90.0 && efp->box.c == 90.0) { swf.cell.x = efp->box.x * round(swf.dr.x / efp->box.x); swf.cell.y = efp->box.y * round(swf.dr.y / efp->box.y); swf.cell.z = efp->box.z * round(swf.dr.z / efp->box.z); swf.dr.x -= swf.cell.x; swf.dr.y -= swf.cell.y; swf.dr.z -= swf.cell.z; } else { cart_to_frac(efp->box, &swf.dr); swf.cell.x = round(swf.dr.x); swf.cell.y = round(swf.dr.y); swf.cell.z = round(swf.dr.z); swf.dr = vec_sub(&swf.dr, &swf.cell); frac_to_cart(efp->box, &swf.cell); frac_to_cart(efp->box, &swf.dr); } } double r = vec_len(&swf.dr); // differentiating between cutoff for exrep and other terms double dswf = 0.0; if (short_range_cutoff == 1) { swf.swf = efp_get_swf(r, efp->opts.xr_cutoff); dswf = efp_get_dswf(r, efp->opts.xr_cutoff); } else { swf.swf = efp_get_swf(r, efp->opts.swf_cutoff); dswf = efp_get_dswf(r, efp->opts.swf_cutoff); } swf.dswf.x = -dswf * swf.dr.x; swf.dswf.y = -dswf * swf.dr.y; swf.dswf.z = -dswf * swf.dr.z; return swf; } int efp_check_rotation_matrix(const mat_t *rotmat) { vec_t ax = { rotmat->xx, rotmat->yx, rotmat->zx }; vec_t ay = { rotmat->xy, rotmat->yy, rotmat->zy }; vec_t az = { rotmat->xz, rotmat->yz, rotmat->zz }; if (!eq(vec_len(&ax), 1.0) || !eq(vec_len(&ay), 1.0) || !eq(vec_len(&az), 1.0)) return 0; if (!eq(vec_dot(&ax, &ay), 0.0)) return 0; vec_t cross = vec_cross(&ax, &ay); if (!eq(cross.x, az.x) || !eq(cross.y, az.y) || !eq(cross.z, az.z)) return 0; return 1; } void efp_points_to_matrix(const double *pts, mat_t *rotmat) { vec_t p1 = { pts[0], pts[1], pts[2] }; vec_t p2 = { pts[3], pts[4], pts[5] }; vec_t p3 = { pts[6], pts[7], pts[8] }; vec_t r12 = vec_sub(&p2, &p1); vec_t r13 = vec_sub(&p3, &p1); vec_normalize(&r12); vec_normalize(&r13); double dot = vec_dot(&r12, &r13); r13.x -= dot * r12.x; r13.y -= dot * r12.y; r13.z -= dot * r12.z; vec_t cross = vec_cross(&r12, &r13); vec_normalize(&r13); vec_normalize(&cross); rotmat->xx = r12.x; rotmat->yx = r12.y; rotmat->zx = r12.z; rotmat->xy = r13.x; rotmat->yy = r13.y; rotmat->zy = r13.z; rotmat->xz = cross.x; rotmat->yz = cross.y; rotmat->zz = cross.z; } const struct frag * efp_find_lib(struct efp *efp, const char *name) { for (size_t i = 0; i < efp->n_lib; i++) if (efp_strcasecmp(efp->lib[i]->name, name) == 0) return efp->lib[i]; return NULL; } void efp_add_stress(const vec_t *dr, const vec_t *force, mat_t *stress) { #ifdef _OPENMP #pragma omp critical #endif { stress->xx += dr->x * force->x; stress->xy += dr->x * force->y; stress->xz += dr->x * force->z; stress->yx += dr->y * force->x; stress->yy += dr->y * force->y; stress->yz += dr->y * force->z; stress->zx += dr->z * force->x; stress->zy += dr->z * force->y; stress->zz += dr->z * force->z; } } void efp_add_force(six_t *grad, const vec_t *com, const vec_t *pt, const vec_t *force, const vec_t *add) { vec_t dr = vec_sub(CVEC(pt->x), com); vec_t torque = vec_cross(&dr, force); if (add) { torque.x += add->x; torque.y += add->y; torque.z += add->z; } six_atomic_add_xyz(grad, force); six_atomic_add_abc(grad, &torque); } void efp_sub_force(six_t *grad, const vec_t *com, const vec_t *pt, const vec_t *force, const vec_t *add) { vec_t dr = vec_sub(CVEC(pt->x), com); vec_t torque = vec_cross(&dr, force); if (add) { torque.x += add->x; torque.y += add->y; torque.z += add->z; } six_atomic_sub_xyz(grad, force); six_atomic_sub_abc(grad, &torque); } void efp_move_pt(const vec_t *com, const mat_t *rotmat, const vec_t *pos_int, vec_t *out) { *out = mat_vec(rotmat, pos_int); out->x += com->x; out->y += com->y; out->z += com->z; } void efp_rotate_t2(const mat_t *rotmat, const double *in, double *out) { for (size_t i = 0; i < 3 * 3; i++) out[i] = 0.0; for (size_t a1 = 0; a1 < 3; a1++) for (size_t b1 = 0; b1 < 3; b1++) for (size_t a2 = 0; a2 < 3; a2++) for (size_t b2 = 0; b2 < 3; b2++) out[a2 * 3 + b2] += in[a1 * 3 + b1] * mat_get(rotmat, a2, a1) * mat_get(rotmat, b2, b1); } void efp_rotate_t3(const mat_t *rotmat, const double *in, double *out) { double a, b, c; for (size_t i = 0; i < 3 * 3 * 3; i++) out[i] = 0.0; for (size_t a1 = 0; a1 < 3; a1++) for (size_t b1 = 0; b1 < 3; b1++) for (size_t c1 = 0; c1 < 3; c1++) for (size_t a2 = 0; a2 < 3; a2++) for (size_t b2 = 0; b2 < 3; b2++) for (size_t c2 = 0; c2 < 3; c2++) { a = mat_get(rotmat, a2, a1); b = mat_get(rotmat, b2, b1); c = mat_get(rotmat, c2, c1); out[a2 * 9 + b2 * 3 + c2] += in[a1 * 9 + b1 * 3 + c1] * a * b * c; } } mat_t rotmat_2frags(const mat_t *rotmat1, const mat_t *rotmat2) { // rotmat2*rotmat1^T mat_t rotmat1_t = mat_transpose(rotmat1); return mat_mat(rotmat2, &rotmat1_t); } int efp_strcasecmp(const char *s1, const char *s2) { while (tolower(*s1) == tolower(*s2++)) if (*s1++ == '\0') return 0; return tolower(*s1) - tolower(*--s2); } int efp_strncasecmp(const char *s1, const char *s2, size_t n) { if (n != 0) { do { if (tolower(*s1) != tolower(*s2++)) return tolower(*s1) - tolower(*--s2); if (*s1++ == '\0') break; } while (--n != 0); } return 0; } void find_plane(const vec_t pt1, const vec_t pt2, const vec_t pt3, vec_t *normal, double d) { // determines plane by three points: ax + by + cz + d = 0, where a,b,c are given by a normal vector vec_t vec21 = vec_sub(&pt2,&pt1); vec_t vec31 = vec_sub(&pt3,&pt1); *normal = vec_cross(&vec21,&vec31); d = - vec_dot(normal,&pt1); } double max_cutoff(const six_t box) { vec_t point000 = {0.0, 0.0, 0.0}; vec_t point100 = {1.0, 0.0, 0.0}; vec_t point010 = {0.0, 1.0, 0.0}; vec_t point001 = {0.0, 0.0, 1.0}; vec_t point = {0.5,0.5,0.5}; frac_to_cart(box, &point100); frac_to_cart(box, &point010); frac_to_cart(box, &point001); frac_to_cart(box, &point); // plane xy: points 000, 100, 010 vec_t normal; double d; double dist; double min_dist; // plane xy: points 000, 100, 010 find_plane(point100, point000, point010, &normal, d); dist = (vec_dot(&normal, &point) + d) / vec_len(&normal); dist = sqrt(dist*dist); min_dist = dist; // plane xz: points 000, 100, 001 find_plane(point100, point000, point001, &normal, d); dist = (vec_dot(&normal, &point) + d) / vec_len(&normal); dist = sqrt(dist*dist); if (min_dist > dist) min_dist = dist; // plane yz: points 000, 010, 001 find_plane(point010, point000, point001, &normal, d); dist = (vec_dot(&normal, &point) + d) / vec_len(&normal); dist = sqrt(dist*dist); if (min_dist > dist) min_dist = dist; return min_dist; }
GB_unaryop__lnot_int16_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int16_bool // op(A') function: GB_tran__lnot_int16_bool // C type: int16_t // A type: bool // cast: int16_t cij = (int16_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int16_bool ( int16_t *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int16_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif