source
stringlengths
3
92
c
stringlengths
26
2.25M
old_copy_if.h
#pragma once #ifdef __USE_GPU__ #include <thrust/copy.h> #include <thrust/execution_policy.h> #endif ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// enum COPY_IF_TYPE { SAVE_ORDER = 0, DONT_SAVE_ORDER = 1 }; ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline int vector_dense_copy_if(const int * __restrict__ _in_data, int *_out_data, int *_tmp_buffer, const int _size, const int _shift, const COPY_IF_TYPE _output_order = SAVE_ORDER, const int _threads_count = MAX_SX_AURORA_THREADS) { int max_buffer_size = _size / (VECTOR_LENGTH * MAX_SX_AURORA_THREADS) + 1; int output_size = 0; int shifts_array[MAX_SX_AURORA_THREADS]; #pragma omp parallel num_threads(MAX_SX_AURORA_THREADS) shared(output_size) { int tid = omp_get_thread_num(); int *private_buffer = &_tmp_buffer[VECTOR_LENGTH * max_buffer_size * tid]; int reg_ptrs[VECTOR_LENGTH]; #pragma _NEC vreg(reg_ptrs) #pragma _NEC vector for (int i = 0; i < VECTOR_LENGTH; i++) { reg_ptrs[i] = 0; } // copy data to buffers #pragma omp for schedule(static, 8) for (int vec_start = 0; vec_start < _size; vec_start += VECTOR_LENGTH) { #pragma _NEC ivdep #pragma _NEC vovertake #pragma _NEC novob #pragma _NEC vector for (int i = 0; i < VECTOR_LENGTH; i++) { int val = 0; if((vec_start + i) < _size) val = _in_data[vec_start + i]; if(val > 0) { int dst_buffer_idx = reg_ptrs[i] + i * max_buffer_size; private_buffer[dst_buffer_idx] = _shift + vec_start + i; reg_ptrs[i]++; } } } // calculate sizes int dump_sizes[VECTOR_LENGTH]; #pragma _NEC vector for (int i = 0; i < VECTOR_LENGTH; i++) { dump_sizes[i] = reg_ptrs[i]; } int private_size = 0; #pragma _NEC vector for (int reg_pos = 0; reg_pos < VECTOR_LENGTH; reg_pos++) { private_size += dump_sizes[reg_pos]; } // calculate output offsets shifts_array[tid] = private_size; #pragma omp barrier #pragma omp master { int cur_shift = 0; for(int i = 1; i < MAX_SX_AURORA_THREADS; i++) { shifts_array[i] += shifts_array[i - 1]; } output_size = shifts_array[MAX_SX_AURORA_THREADS - 1]; for(int i = (MAX_SX_AURORA_THREADS - 1); i >= 1; i--) { shifts_array[i] = shifts_array[i - 1]; } shifts_array[0] = 0; } #pragma omp barrier int output_offset = shifts_array[tid]; // save data to output array if(_output_order == DONT_SAVE_ORDER) { int current_pos = 0; for(int reg_pos = 0; reg_pos < VECTOR_LENGTH; reg_pos++) { #pragma _NEC ivdep #pragma _NEC vovertake #pragma _NEC novob #pragma _NEC vector for (int i = 0; i < dump_sizes[reg_pos]; i++) { int src_buffer_idx = i + reg_pos * max_buffer_size; _out_data[output_offset + current_pos + i] = private_buffer[src_buffer_idx]; } current_pos += dump_sizes[reg_pos]; } } else if(_output_order == SAVE_ORDER) { int max_work = 0; #pragma _NEC vector for(int reg_pos = 0; reg_pos < VECTOR_LENGTH; reg_pos++) { if(reg_ptrs[reg_pos] > max_work) max_work = reg_ptrs[reg_pos]; } int min_work = dump_sizes[0]; #pragma _NEC vector for(int reg_pos = 0; reg_pos < VECTOR_LENGTH; reg_pos++) { if(reg_ptrs[reg_pos] < min_work) min_work = reg_ptrs[reg_pos]; } // save large part int current_pos = 0; for(int work_pos = 0; work_pos < min_work; work_pos++) { #pragma _NEC ivdep #pragma _NEC vovertake #pragma _NEC novob #pragma _NEC vector for(int i = 0; i < VECTOR_LENGTH; i++) { int src_buffer_idx = work_pos + i * max_buffer_size; _out_data[output_offset + current_pos + i] = private_buffer[src_buffer_idx]; } current_pos += VECTOR_LENGTH; } // save reminder for(int work_pos = min_work; work_pos < max_work; work_pos++) { #pragma _NEC vovertake #pragma _NEC novob #pragma _NEC vector for(int i = 0; i < VECTOR_LENGTH; i++) { if(work_pos < reg_ptrs[i]) { int src_buffer_idx = work_pos + i * max_buffer_size; _out_data[output_offset + current_pos] = private_buffer[src_buffer_idx]; current_pos++; } } } // save reminder /*for(int reg_pos = 0; reg_pos < VECTOR_LENGTH; reg_pos++) { #pragma _NEC ivdep #pragma _NEC vovertake #pragma _NEC novob #pragma _NEC vector for (int i = min_work; i < dump_sizes[reg_pos]; i++) { int src_buffer_idx = i + reg_pos * max_buffer_size; _out_data[output_offset + current_pos + i - min_work] = private_buffer[src_buffer_idx]; } current_pos += dump_sizes[reg_pos] - min_work; }*/ } } return output_size; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// inline int prefix_sum_copy_if(const int * __restrict__ _in_data, int *_out_data, int *_tmp_buffer, const int _size) { int *suma; #pragma omp parallel { const int ithread = omp_get_thread_num(); const int nthreads = omp_get_num_threads(); #pragma omp single { suma = new int[nthreads+1]; suma[0] = 0; } #pragma omp for schedule(static) for (int i = 0; i < _size; i++) { if(_in_data[i] > 0) _tmp_buffer[i] = 1; else _tmp_buffer[i] = 0; } int sum = 0; #pragma omp for schedule(static) for (int i = 0; i < _size; i++) { sum += _tmp_buffer[i]; _tmp_buffer[i] = sum; } suma[ithread+1] = sum; #pragma omp barrier int offset = 0; for(int i=0; i<(ithread+1); i++) { offset += suma[i]; } #pragma omp for schedule(static) for (int i = 0; i < _size; i++) { _tmp_buffer[i] += offset; } #pragma omp for schedule(static) for (int i = 0; i < _size; i++) { if(_in_data[i] > 0) { _out_data[_tmp_buffer[i] - 1] = i; } } } int output_size = 0; for(int i = 0; i < omp_get_max_threads(); i++) { output_size += suma[i]; } delete[] suma; return output_size; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Cond> inline int generic_dense_copy_if(Cond &&_cond, int *_out_data, int *_tmp_buffer, const int _size, const int _shift, const COPY_IF_TYPE _output_order = SAVE_ORDER, const int _threads_count = MAX_SX_AURORA_THREADS) { int max_buffer_size = _size / (VECTOR_LENGTH * MAX_SX_AURORA_THREADS) + 1; int output_size = 0; int shifts_array[MAX_SX_AURORA_THREADS]; #pragma omp parallel num_threads(MAX_SX_AURORA_THREADS) shared(output_size) { int tid = omp_get_thread_num(); int *private_buffer = &_tmp_buffer[VECTOR_LENGTH * max_buffer_size * tid]; int reg_ptrs[VECTOR_LENGTH]; #pragma _NEC vreg(reg_ptrs) #pragma _NEC vector for (int i = 0; i < VECTOR_LENGTH; i++) { reg_ptrs[i] = 0; } // copy data to buffers #pragma omp for schedule(static, 8) for (int vec_start = 0; vec_start < _size; vec_start += VECTOR_LENGTH) { #pragma _NEC ivdep #pragma _NEC vovertake #pragma _NEC novob #pragma _NEC vector for (int i = 0; i < VECTOR_LENGTH; i++) { if(_cond(vec_start + i) != -1) { int dst_buffer_idx = reg_ptrs[i] + i * max_buffer_size; private_buffer[dst_buffer_idx] = _shift + vec_start + i; reg_ptrs[i]++; } } } // calculate sizes int dump_sizes[VECTOR_LENGTH]; #pragma _NEC vector for (int i = 0; i < VECTOR_LENGTH; i++) { dump_sizes[i] = reg_ptrs[i]; } int private_size = 0; #pragma _NEC vector for (int reg_pos = 0; reg_pos < VECTOR_LENGTH; reg_pos++) { private_size += dump_sizes[reg_pos]; } // calculate output offsets shifts_array[tid] = private_size; #pragma omp barrier #pragma omp master { int cur_shift = 0; for(int i = 1; i < MAX_SX_AURORA_THREADS; i++) { shifts_array[i] += shifts_array[i - 1]; } output_size = shifts_array[MAX_SX_AURORA_THREADS - 1]; for(int i = (MAX_SX_AURORA_THREADS - 1); i >= 1; i--) { shifts_array[i] = shifts_array[i - 1]; } shifts_array[0] = 0; } #pragma omp barrier int output_offset = shifts_array[tid]; // save data to output array if(_output_order == DONT_SAVE_ORDER) { int current_pos = 0; for(int reg_pos = 0; reg_pos < VECTOR_LENGTH; reg_pos++) { #pragma _NEC ivdep #pragma _NEC vovertake #pragma _NEC novob #pragma _NEC vector for (int i = 0; i < dump_sizes[reg_pos]; i++) { int src_buffer_idx = i + reg_pos * max_buffer_size; _out_data[output_offset + current_pos + i] = private_buffer[src_buffer_idx]; } current_pos += dump_sizes[reg_pos]; } } else if(_output_order == SAVE_ORDER) { int max_work = 0; #pragma _NEC vector for(int reg_pos = 0; reg_pos < VECTOR_LENGTH; reg_pos++) { if(reg_ptrs[reg_pos] > max_work) max_work = reg_ptrs[reg_pos]; } int min_work = dump_sizes[0]; #pragma _NEC vector for(int reg_pos = 0; reg_pos < VECTOR_LENGTH; reg_pos++) { if(reg_ptrs[reg_pos] < min_work) min_work = reg_ptrs[reg_pos]; } // save large part int current_pos = 0; for(int work_pos = 0; work_pos < min_work; work_pos++) { #pragma _NEC ivdep #pragma _NEC vovertake #pragma _NEC novob #pragma _NEC vector for(int i = 0; i < VECTOR_LENGTH; i++) { int src_buffer_idx = work_pos + i * max_buffer_size; _out_data[output_offset + current_pos + i] = private_buffer[src_buffer_idx]; } current_pos += VECTOR_LENGTH; } // save reminder for(int work_pos = min_work; work_pos < max_work; work_pos++) { #pragma _NEC vovertake #pragma _NEC novob #pragma _NEC vector for(int i = 0; i < VECTOR_LENGTH; i++) { if(work_pos < reg_ptrs[i]) { int src_buffer_idx = work_pos + i * max_buffer_size; _out_data[output_offset + current_pos] = private_buffer[src_buffer_idx]; current_pos++; } } } } } return output_size; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include "copy_if.hpp" /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
determinantOfNcrossNMatrix.c
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <omp.h> #define SIZE 10 int main(){ float mat[SIZE][SIZE], ratio, det = 1; int i,j,k,n; printf("Enter Matrix Dimension = "); scanf("%d", &n); printf("\nEnter Elements: \n"); for(i = 0; i < n; i++){ for(j = 0; j < n;j++){ printf("mat[%d][%d] = ", i, j); scanf("%f", &mat[i][j]); } } printf("\nMatrix: \n"); for(i = 0; i < n; i++){ for(j = 0; j < n; j++){ printf("%0.2f\t", mat[i][j]); } printf("\n"); } for(i = 0; i < n; i++){ if(mat[i][i] == 0.0){ printf("Mathematical Error!"); exit(0); } for(j = i + 1; j < n; j++){ ratio = mat[j][i] / mat[i][i]; for(k = 0; k < n; k++){ mat[j][k] = mat[j][k] - ratio * mat[i][k]; } } } #pragma omp parallel for shared(mat) private(i) reduction(*:det) for(i = 0; i < n; i++){ det *= mat[i][i]; } printf("\nDeterminant of given matrix is: %0.3f\n", det); return 0; }
utils.h
#ifndef CK_A2A_UTILS #define CK_A2A_UTILS #include <alg/alg_fix_gauge.h> #include <alg/a2a/gsl_wrapper.h> #include <alg/a2a/template_wizardry.h> #include <util/spincolorflavormatrix.h> CPS_START_NAMESPACE //3x3 complex vector multiplication with different precision matrices and vectors template<typename VecFloat, typename MatFloat> void colorMatrixMultiplyVector(VecFloat* y, const MatFloat* u, const VecFloat* x){ *y = *u * *x - *(u+1) * *(x+1) + *(u+2) * *(x+2) - *(u+3) * *(x+3) + *(u+4) * *(x+4) - *(u+5) * *(x+5); *(y+1) = *u * *(x+1) + *(u+1) * *x + *(u+2) * *(x+3) + *(u+3) * *(x+2) + *(u+4) * *(x+5) + *(u+5) * *(x+4); *(y+2) = *(u+6) * *x - *(u+7) * *(x+1) + *(u+8) * *(x+2) - *(u+9) * *(x+3) + *(u+10) * *(x+4) - *(u+11) * *(x+5); *(y+3) = *(u+6) * *(x+1) + *(u+7) * *x + *(u+8) * *(x+3) + *(u+9) * *(x+2) + *(u+10) * *(x+5) + *(u+11) * *(x+4); *(y+4) = *(u+12) * *x - *(u+13) * *(x+1) + *(u+14) * *(x+2) - *(u+15) * *(x+3) + *(u+16) * *(x+4) - *(u+17) * *(x+5); *(y+5) = *(u+12) * *(x+1) + *(u+13) * *x + *(u+14) * *(x+3) + *(u+15) * *(x+2) + *(u+16) * *(x+5) + *(u+17) * *(x+4); } //M^\dagger v //0 ,1 2 ,3 4 ,5 //6 ,7 8 ,9 10,11 //12,13 14,15 16,17 //-> //0 ,-1 6 ,-7 12,-13 //2 ,-3 8 ,-9 14,-15 //4 ,-5 10,-11 16,-17 template<typename VecFloat, typename MatFloat> void colorMatrixDaggerMultiplyVector(VecFloat* y, const MatFloat* u, const VecFloat* x){ *y = *u * *x + *(u+1) * *(x+1) + *(u+6) * *(x+2) + *(u+7) * *(x+3) + *(u+12) * *(x+4) + *(u+13) * *(x+5); *(y+1) = *u * *(x+1) - *(u+1) * *x + *(u+6) * *(x+3) - *(u+7) * *(x+2) + *(u+12) * *(x+5) - *(u+13) * *(x+4); *(y+2) = *(u+2) * *x + *(u+3) * *(x+1) + *(u+8) * *(x+2) + *(u+9) * *(x+3) + *(u+14) * *(x+4) + *(u+15) * *(x+5); *(y+3) = *(u+2) * *(x+1) - *(u+3) * *x + *(u+8) * *(x+3) - *(u+9) * *(x+2) + *(u+14) * *(x+5) - *(u+15) * *(x+4); *(y+4) = *(u+4) * *x + *(u+5) * *(x+1) + *(u+10) * *(x+2) + *(u+11) * *(x+3) + *(u+16) * *(x+4) + *(u+17) * *(x+5); *(y+5) = *(u+4) * *(x+1) - *(u+5) * *x + *(u+10) * *(x+3) - *(u+11) * *(x+2) + *(u+16) * *(x+5) - *(u+17) * *(x+4); } //Array *= with cps::Float(=double) input and arbitrary precision output template<typename FloatOut,typename FloatIn> void VecTimesEquFloat(FloatOut *out, FloatIn *in, const Float fac, const int len) { #pragma omp parallel for for(int i = 0; i < len; i++) out[i] = in[i] * fac; } inline void getNodeWork(const int work, int &node_work, int &node_off, bool &do_work, const bool node_local = false){ if(node_local){ node_work = work; node_off = 0; do_work = true; return; } //node does all the work int nodes = 1; for(int i=0;i<5;i++) nodes *= GJP.Nodes(i); int me = UniqueID(); //Stolen from BFM :) int basework = work/nodes; int backfill = nodes-(work % nodes); node_work = (work+me)/nodes; node_off = basework * me; if ( me > backfill ) node_off+= (me-backfill); if(node_work > 0) do_work = true; } inline void thread_work(int &my_work, int &my_offset, const int total_work, const int me, const int team){ my_work = total_work/team; my_offset = me * my_work; int rem = total_work - my_work * team; if(me < rem){ ++my_work; //first rem threads mop up the remaining work my_offset += me; //each thread before me has gained one extra unit of work }else my_offset += rem; //after the first rem threads, the offset shift is uniform } inline void compute_overlap(std::vector<bool> &out, const std::vector<bool> &a, const std::vector<bool> &b){ assert(a.size()==b.size()); out.resize(a.size()); for(int i=0;i<a.size();i++) out[i] = a[i] && b[i]; } class NullObject { public: NullObject(){} }; //A class inheriting from this type must have template parameter T as a double or float #define EXISTS_IF_DOUBLE_OR_FLOAT(T) public my_enable_if<is_double_or_float<mf_Float>::value,NullObject>::type //Functions for performing global and timeslice sums of single or double precision quantities. Daiqian had to implement these himself as CPS can only do this with the Float=double type // My global sum template <typename T> void QMP_sum_array(T *result, int len){ #ifdef USE_QMP if(sizeof(T) == sizeof(double)) { QMP_sum_double_array((double*)result, len); } else if(sizeof(T) == sizeof(float)) { QMP_sum_float_array((float*)result, len); } else { QMP_error("QMP_sum_array::data type not supported!\n"); } #else //CK: This only works for single-node code int nodes = 1; for(int i=0;i<4;i++) nodes *= cps::GJP.Nodes(i); if(nodes != 1){ cps::ERR.General("","QMP_sum_array(T *result, int len)","Only implemented for QMP on parallel machines"); } //do nothing! #endif } #ifndef USE_QMP inline void QMP_sum_double_array(double *result, int len){ //CK: This only works for single-node code int nodes = 1; for(int i=0;i<4;i++) nodes *= cps::GJP.Nodes(i); if(nodes != 1){ cps::ERR.General("","QMP_sum_double_array fake definition","Not implemented on parallel machines: use QMP!"); } } inline void QMP_sum_float_array(float *result, int len){ //CK: This only works for single-node code int nodes = 1; for(int i=0;i<4;i++) nodes *= cps::GJP.Nodes(i); if(nodes != 1){ cps::ERR.General("","QMP_sum_float_array fake definition","Not implemented on parallel machines: use QMP!"); } } #endif //Look for contiguous blocks of indices in the idx_map, output a list of start,size pairs inline void find_contiguous_blocks(std::vector<std::pair<int,int> > &blocks, const int idx_map[], int map_size){ blocks.resize(0); std::pair<int,int> block(0,1); //start, size int prev = idx_map[0]; for(int j_packed=1;j_packed<map_size;j_packed++){ int j_unpacked = idx_map[j_packed]; if(j_unpacked == prev+1){ ++block.second; }else{ blocks.push_back(block); block.first = j_packed; block.second = 1; } prev = j_unpacked; } blocks.push_back(block); int sum = 0; for(int b=0;b<blocks.size();b++){ //printf("Block %d, start %d, size %d\n",b,blocks[b].first,blocks[b].second); sum += blocks[b].second; } if(sum != map_size) ERR.General("find_contiguous_blocks","","Sum of block sizes %d, expect %d\n",sum,map_size); } template<typename T> inline void resize_2d(std::vector<std::vector<T> > &v, const size_t i, const size_t j){ v.resize(i); for(int a=0;a<i;a++) v[a].resize(j); } template<typename T> inline void resize_3d(std::vector<std::vector<std::vector<T> > > &v, const size_t i, const size_t j, const size_t k){ v.resize(i); for(int a=0;a<i;a++){ v[a].resize(j); for(int b=0;b<j;b++) v[a][b].resize(k); } } inline std::complex<double> GSLtrace(const SpinColorFlavorMatrix& a, const SpinColorFlavorMatrix& b){ const int scf_size = 24; std::complex<double> _a[scf_size][scf_size]; std::complex<double> _bT[scf_size][scf_size]; //In-place transpose of b so rows are contiguous for(int i=0;i<scf_size;i++){ int rem = i; int ci = rem % 3; rem /= 3; int si = rem % 4; rem /= 4; int fi = rem; for(int j=0;j<scf_size;j++){ rem = j; int cj = rem % 3; rem /= 3; int sj = rem % 4; rem /= 4; int fj = rem; _bT[i][j] = b(sj,cj,fj, si,ci,fi); _a[i][j] = a(si,ci,fi, sj,cj,fj); } } double* ad = (double*)&_a[0][0]; double* bd = (double*)&_bT[0][0]; gsl_block_complex_struct ablock; ablock.size = 24*24; ablock.data = ad; gsl_vector_complex arow; //single row of a arow.block = &ablock; arow.owner = 0; arow.size = 24; arow.stride = 1; gsl_block_complex_struct bblock; bblock.size = 24*24; bblock.data = bd; gsl_vector_complex bcol; //single col of b bcol.block = &bblock; bcol.owner = 0; bcol.size = 24; bcol.stride = 1; //gsl_blas_zdotu (const gsl_vector_complex * x, const gsl_vector_complex * y, gsl_complex * dotu) // // a[0][0]*b[0][0] + a[0][1]*b[1][0] + a[0][2]*b[2][0] + ... // //+ a[1][0]*b[0][1] + a[1][1]*b[1][1] + a[1][2]*b[2][1] + .... // //... std::complex<double> out(0.0); gsl_complex tmp; for(int i=0;i<24;i++){ arow.data = ad + 24*2*i; //i'th row offset bcol.data = bd + 24*2*i; //i'th col offset (remember we transposed it) gsl_blas_zdotu(&arow, &bcol, &tmp); reinterpret_cast<double(&)[2]>(out)[0] += GSL_REAL(tmp); reinterpret_cast<double(&)[2]>(out)[1] += GSL_IMAG(tmp); } return out; } //For a Nrows*Ncols matrix 'to' with elements in the standard order idx=(Ncols*i + j), poke a submatrix into it with origin (i0,j0) and size (ni,nj) template<typename T> void pokeSubmatrix(T* to, const T* sub, const int Nrows, const int Ncols, const int i0, const int j0, const int ni, const int nj, const bool threaded = false){ #define DOIT \ for(int row = i0; row < i0+ni; row++){ \ T* to_block = to + row*Ncols + j0; \ const T* from_block = sub + (row-i0)*nj; \ memcpy(to_block,from_block,nj*sizeof(T)); \ } if(threaded){ #pragma omp parallel for DOIT; }else{ DOIT; } #undef DOIT } //For a Nrows*Ncols matrix 'from' with elements in the standard order idx=(Ncols*i + j), get a submatrix with origin (i0,j0) and size (ni,nj) and store in sub template<typename T> void getSubmatrix(T* sub, const T* from, const int Nrows, const int Ncols, const int i0, const int j0, const int ni, const int nj, const bool threaded = false){ #define DOIT \ for(int row = i0; row < i0+ni; row++){ \ const T* from_block = from + row*Ncols + j0; \ T* to_block = sub + (row-i0)*nj; \ memcpy(to_block,from_block,nj*sizeof(T)); \ } if(threaded){ #pragma omp parallel for DOIT; }else{ DOIT; } #undef DOIT } //Simple test allocator to find out when memory is allocated template <typename T> class mmap_allocator: public std::allocator<T>{ public: typedef size_t size_type; typedef T* pointer; typedef const T* const_pointer; template<typename _Tp1> struct rebind{ typedef mmap_allocator<_Tp1> other; }; pointer allocate(size_type n, const void *hint=0){ fprintf(stderr, "Alloc %d bytes.\n", n*sizeof(T)); return std::allocator<T>::allocate(n, hint); } void deallocate(pointer p, size_type n){ fprintf(stderr, "Dealloc %d bytes (%p).\n", n*sizeof(T), p); return std::allocator<T>::deallocate(p, n); } mmap_allocator() throw(): std::allocator<T>() { fprintf(stderr, "Hello allocator!\n"); } mmap_allocator(const mmap_allocator &a) throw(): std::allocator<T>(a) { } template <class U> mmap_allocator(const mmap_allocator<U> &a) throw(): std::allocator<T>(a) { } ~mmap_allocator() throw() { } }; CPS_END_NAMESPACE #ifdef ARCH_BGQ #include <spi/include/kernel/memory.h> #else #include <sys/sysinfo.h> #endif CPS_START_NAMESPACE inline double byte_to_MB(const int b){ return double(b)/1024./1024.; } inline void printMem(){ #ifdef ARCH_BGQ #warning "printMem using ARCH_BGQ" uint64_t shared, persist, heapavail, stackavail, stack, heap, guard, mmap; Kernel_GetMemorySize(KERNEL_MEMSIZE_SHARED, &shared); Kernel_GetMemorySize(KERNEL_MEMSIZE_PERSIST, &persist); Kernel_GetMemorySize(KERNEL_MEMSIZE_HEAPAVAIL, &heapavail); Kernel_GetMemorySize(KERNEL_MEMSIZE_STACKAVAIL, &stackavail); Kernel_GetMemorySize(KERNEL_MEMSIZE_STACK, &stack); Kernel_GetMemorySize(KERNEL_MEMSIZE_HEAP, &heap); Kernel_GetMemorySize(KERNEL_MEMSIZE_GUARD, &guard); Kernel_GetMemorySize(KERNEL_MEMSIZE_MMAP, &mmap); if(!UniqueID()){ printf("printMem: Allocated heap: %.2f MB, avail. heap: %.2f MB\n", (double)heap/(1024*1024),(double)heapavail/(1024*1024)); printf("printMem: Allocated stack: %.2f MB, avail. stack: %.2f MB\n", (double)stack/(1024*1024), (double)stackavail/(1024*1024)); printf("printMem: Memory: shared: %.2f MB, persist: %.2f MB, guard: %.2f MB, mmap: %.2f MB\n", (double)shared/(1024*1024), (double)persist/(1024*1024), (double)guard/(1024*1024), (double)mmap/(1024*1024)); } #else #warning "printMem using NOARCH" /* unsigned long totalram; /\* Total usable main memory size *\/ */ /* unsigned long freeram; /\* Available memory size *\/ */ /* unsigned long sharedram; /\* Amount of shared memory *\/ */ /* unsigned long bufferram; /\* Memory used by buffers *\/ */ /* unsigned long totalswap; /\* Total swap space size *\/ */ /* unsigned long freeswap; /\* swap space still available *\/ */ /* unsigned short procs; /\* Number of current processes *\/ */ /* unsigned long totalhigh; /\* Total high memory size *\/ */ /* unsigned long freehigh; /\* Available high memory size *\/ */ /* unsigned int mem_unit; /\* Memory unit size in bytes *\/ */ struct sysinfo myinfo; sysinfo(&myinfo); double total_mem = myinfo.mem_unit * myinfo.totalram; total_mem /= (1024.*1024.); double free_mem = myinfo.mem_unit * myinfo.freeram; free_mem /= (1024.*1024.); if(!UniqueID()){ printf("printMem: Memory: total: %.2f MB, avail: %.2f MB, used %.2f MB\n",total_mem, free_mem, total_mem-free_mem); } //# define PRINT_MALLOC_INFO //Use of int means this is garbage for large memory systems # ifdef PRINT_MALLOC_INFO struct mallinfo mi; mi = mallinfo(); // int arena; /* Non-mmapped space allocated (bytes) */ // int ordblks; /* Number of free chunks */ // int smblks; /* Number of free fastbin blocks */ // int hblks; /* Number of mmapped regions */ // int hblkhd; /* Space allocated in mmapped regions (bytes) */ // int usmblks; /* Maximum total allocated space (bytes) */ // int fsmblks; /* Space in freed fastbin blocks (bytes) */ // int uordblks; /* Total allocated space (bytes) */ // int fordblks; /* Total free space (bytes) */ // int keepcost; /* Top-most, releasable space (bytes) */ if(!UniqueID()){ printf("printMem: Malloc info: arena %f MB, ordblks %d, smblks %d, hblks %d, hblkhd %f MB, fsmblks %f MB, uordblks %f MB, fordblks %f MB, keepcost %f MB\n", byte_to_MB(mi.arena), mi.ordblks, mi.smblks, mi.hblks, byte_to_MB(mi.hblkhd), byte_to_MB(mi.fsmblks), byte_to_MB(mi.uordblks), byte_to_MB(mi.fordblks), byte_to_MB(mi.keepcost) ); } # endif //# define PRINT_MALLOC_STATS Also doesn't work well # ifdef PRINT_MALLOC_STATS if(!UniqueID()) malloc_stats(); # endif #endif } //Skip gauge fixing and set all gauge fixing matrices to unity void gaugeFixUnity(Lattice &lat, const FixGaugeArg &fix_gauge_arg){ FixGaugeType fix = fix_gauge_arg.fix_gauge_kind; int start = fix_gauge_arg.hyperplane_start; int step = fix_gauge_arg.hyperplane_step; int num = fix_gauge_arg.hyperplane_num; int h_planes[num]; for(int i=0; i<num; i++) h_planes[i] = start + step * i; lat.FixGaugeAllocate(fix, num, h_planes); #pragma omp parallel for for(int sf=0;sf<(GJP.Gparity()+1)*GJP.VolNodeSites();sf++){ //s + vol*f int s = sf % GJP.VolNodeSites(); int f = sf / GJP.VolNodeSites(); const Matrix* mat = lat.FixGaugeMatrix(s,f); if(mat == NULL) continue; else{ Matrix* mm = const_cast<Matrix*>(mat); //evil, I know, but it saves duplicating the accessor (which is overly complicated) mm->UnitMatrix(); } } } //Set the complex number at pointer p to a random value of a chosen type //Uses the current LRG for the given FermionFieldDimension. User should choose the range and the particular site-RNG themselves beforehand template<typename mf_Float> class RandomComplex{}; //Only for float and double, hence I have to control its access template<typename mf_Float> class RandomComplexBase{ protected: template<typename T> friend class RandomComplex; static void rand(mf_Float *p, const RandomType type, const FermionFieldDimension frm_dim){ static const Float PI = 3.14159265358979323846; Float theta = LRG.Urand(frm_dim); switch(type) { case UONE: p[0] = cos(2. * PI * theta); p[1] = sin(2. * PI * theta); break; case ZTWO: p[0] = theta > 0.5 ? 1 : -1; p[1] = 0; break; case ZFOUR: if(theta > 0.75) { p[0] = 1; p[1] = 0; }else if(theta > 0.5) { p[0] = -1; p[1] = 0; }else if(theta > 0.25) { p[0] = 0; p[1] = 1; }else { p[0] = 0; p[1] = -1; } break; default: ERR.NotImplemented("RandomComplexBase", "rand(...)"); } } }; template<typename T> class RandomComplex<std::complex<T> > : public RandomComplexBase<T>{ public: static void rand(std::complex<T> *p, const RandomType &type, const FermionFieldDimension &frm_dim){ RandomComplexBase<T>::rand( (T*)p, type, frm_dim); } }; template<typename T, typename T_class> struct _mult_sgn_times_i_impl{}; template<typename T> struct _mult_sgn_times_i_impl<T,complex_double_or_float_mark>{ inline static T doit(const int sgn, const T &val){ return T( -sgn * val.imag(), sgn * val.real() ); // sign * i * val } }; #ifdef USE_GRID template<typename T> struct _mult_sgn_times_i_impl<T,grid_vector_complex_mark>{ inline static T doit(const int sgn, const T &val){ return sgn == -1 ? timesMinusI(val) : timesI(val); } }; #endif // template<typename T, typename my_enable_if<is_complex_double_or_float<T>::value,int>::type = 0> //for standard complex types // inline T multiplySignTimesI(const int sgn, const T &val){ // return T( -sgn * val.imag(), sgn * val.real() ); // sign * i * val // } // #ifdef USE_GRID // template<typename T, typename my_enable_if<is_grid_vector_complex<T>::value,int>::type = 0> //for Grid complex types // inline T multiplySignTimesI(const int sgn, const T &val){ // return sgn == -1 ? timesMinusI(val) : timesI(val); // } // #endif template<typename T> inline T multiplySignTimesI(const int sgn, const T &val){ return _mult_sgn_times_i_impl<T,typename ComplexClassify<T>::type>::doit(sgn,val); } template<typename T, typename ComplexClass> struct _cconj{}; template<typename T> struct _cconj<T,complex_double_or_float_mark>{ static inline T doit(const T &in){ return std::conj(in); } }; #ifdef USE_GRID template<typename T> struct _cconj<T,grid_vector_complex_mark>{ static inline T doit(const T &in){ return Grid::conjugate(in); } }; #endif template<typename T> inline T cconj(const T& in){ return _cconj<T,typename ComplexClassify<T>::type>::doit(in); } template<typename T> std::complex<double> convertComplexD(const std::complex<T> &what){ return what; } #ifdef USE_GRID std::complex<double> convertComplexD(const Grid::vComplexD &what){ return Reduce(what); } std::complex<double> convertComplexD(const Grid::vComplexF &what){ return Reduce(what); } #endif template<typename T> void globalSumComplex(std::complex<T>* v, const int n){ QMP_sum_array( (T*)v,2*n); } #ifdef USE_GRID template<typename T> struct _globalSumComplexGrid{ static inline void doit(T *v, const int n){ typedef typename T::scalar_type scalar_type; //an std::complex type typedef typename scalar_type::value_type floatType; int vmult = sizeof(T)/sizeof(scalar_type); floatType * ptr = (floatType *)v; QMP_sum_array(ptr,2*n*vmult); } }; void globalSumComplex(Grid::vComplexD* v, const int n){ _globalSumComplexGrid<Grid::vComplexD>::doit(v,n); } void globalSumComplex(Grid::vComplexF* v, const int n){ _globalSumComplexGrid<Grid::vComplexF>::doit(v,n); } #endif //The base G-parity momentum vector for quark fields with arbitrary sign inline void GparityBaseMomentum(int p[3], const int sgn){ for(int i=0;i<3;i++) if(GJP.Bc(i) == BND_CND_GPARITY) p[i] = sgn; else p[i] = 0; } #ifdef USE_MPI //get MPI rank of this node inline int getMyMPIrank(){ int my_mpi_rank; int ret = MPI_Comm_rank(MPI_COMM_WORLD, &my_mpi_rank); if(ret != MPI_SUCCESS) ERR.General("A2AmesonField","read","Comm_rank failed\n"); return my_mpi_rank; } //get the MPI rank of the node with UniqueID() == 0 inline int getHeadMPIrank(){ int head_mpi_rank; int rank_tmp = UniqueID() == 0 ? getMyMPIrank() : 0; int ret = MPI_Allreduce(&rank_tmp,&head_mpi_rank, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); //node is now the MPI rank corresponding to UniqueID == _node if(ret != MPI_SUCCESS) ERR.General("A2AmesonField","read","Reduce failed\n"); return head_mpi_rank; } inline int node_lex(const int* coor, const int ndir){ int out = 0; for(int i=ndir-1;i>=0;i--){ out *= GJP.Nodes(i); out += coor[i]; } return out; } //Generate map to convert lexicographic node index from GJP to an MPI rank in MPI_COMM_WORLD inline void getMPIrankMap(std::vector<int> &map){ int nodes = 1; int my_node_coor[5]; for(int i=0;i<5;i++){ nodes*= GJP.Nodes(i); my_node_coor[i] = GJP.NodeCoor(i); } const int my_node_lex = node_lex( my_node_coor, 5 ); const int my_mpi_rank = getMyMPIrank(); int *node_map_send = (int*)malloc(nodes*sizeof(int)); memset(node_map_send,0,nodes*sizeof(int)); node_map_send[my_node_lex] = my_mpi_rank; map.resize(nodes); int ret = MPI_Allreduce(node_map_send, &map[0], nodes, MPI_INT, MPI_SUM, MPI_COMM_WORLD); assert(ret == MPI_SUCCESS); free(node_map_send); } #endif //Invert 3x3 complex matrix. Expect elements accessible as row*3 + col //0 1 2 //3 4 5 //6 7 8 //+ - + //- + - //+ - + template<typename Zout, typename Zin> void z3x3_invert(Zout* out, Zin const* in){ out[0] = in[4]*in[8]-in[7]*in[5]; out[1] = -in[3]*in[8]+in[6]*in[5]; out[2] = in[3]*in[7]-in[6]*in[4]; out[3] = -in[1]*in[8]+in[7]*in[2]; out[4] = in[0]*in[8]-in[6]*in[2]; out[5] = -in[0]*in[7]+in[6]*in[1]; out[6] = in[1]*in[5]-in[4]*in[2]; out[7] = -in[0]*in[5]+in[3]*in[2]; out[8] = in[0]*in[4]-in[3]*in[1]; Zout det = in[0]*out[0] + in[1]*out[1] + in[2]*out[2]; out[0] /= det; out[1] /= det; out[2] /= det; out[3] /= det; out[4] /= det; out[5] /= det; out[6] /= det; out[7] /= det; out[8] /= det; } //A class that owns data via a pointer that has an assignment and copy constructor which does a deep copy. template<typename T> class PtrWrapper{ T* t; public: inline T& operator*(){ return *t; } inline T* operator->(){ return t; } inline T const& operator*() const{ return *t; } inline T const* operator->() const{ return t; } inline PtrWrapper(): t(NULL){}; inline PtrWrapper(T* _t): t(_t){} inline ~PtrWrapper(){ if(t!=NULL) delete t; } inline const bool assigned() const{ return t != NULL; } inline void set(T* _t){ if(t!=NULL) delete t; t = _t; } inline void free(){ if(t!=NULL) delete t; t = NULL; } //Deep copies inline PtrWrapper(const PtrWrapper &r): t(NULL){ if(r.t != NULL) t = new T(*r.t); } inline PtrWrapper & operator=(const PtrWrapper &r){ if(t!=NULL){ delete t; t = NULL; } if(r.t!=NULL) t = new T(*r.t); } }; CPS_END_NAMESPACE #endif
hoImageRegContainer2DRegistration.h
/** \file hoImageRegContainer2DRegistration.h \brief Define the class to perform image registration over a 2D image container \author Hui Xue */ #ifndef hoImageRegContainer2DRegistration_H_ #define hoImageRegContainer2DRegistration_H_ #pragma once #include <sstream> #include "hoNDArray.h" #include "hoNDImage.h" #include "hoMRImage.h" #include "hoNDInterpolator.h" #include "hoNDBoundaryHandler.h" #include "hoMatrix.h" #include "hoNDArray_utils.h" #include "hoNDArray_elemwise.h" #include "hoNDImage_util.h" // transformation #include "hoImageRegTransformation.h" #include "hoImageRegParametricTransformation.h" #include "hoImageRegDeformationField.h" // warper #include "hoImageRegWarper.h" // solver #include "hoImageRegDeformationFieldSolver.h" #include "hoImageRegParametricSolver.h" #include "hoImageRegDeformationFieldBidirectionalSolver.h" // dissimilarity #include "hoImageRegDissimilaritySSD.h" #include "hoImageRegDissimilarityLocalCCR.h" #include "hoImageRegDissimilarityMutualInformation.h" #include "hoImageRegDissimilarityNormalizedMutualInformation.h" // register #include "hoImageRegDeformationFieldRegister.h" #include "hoImageRegDeformationFieldBidirectionalRegister.h" // container2D #include "hoNDImageContainer2D.h" namespace Gadgetron { template <typename ObjType> void printInfo(const ObjType& obj) { std::ostringstream outs; obj.print(outs); outs << std::ends; std::string msg(outs.str()); GDEBUG_STREAM(msg.c_str()); } enum GT_IMAGE_REG_CONTAINER_MODE { GT_IMAGE_REG_CONTAINER_PAIR_WISE, GT_IMAGE_REG_CONTAINER_FIXED_REFERENCE, GT_IMAGE_REG_CONTAINER_PROGRESSIVE }; inline std::string getImageRegContainerModeName(GT_IMAGE_REG_CONTAINER_MODE v) { std::string name; switch (v) { case GT_IMAGE_REG_CONTAINER_PAIR_WISE: name = "Pair-wise"; break; case GT_IMAGE_REG_CONTAINER_FIXED_REFERENCE: name = "FixedReference"; break; case GT_IMAGE_REG_CONTAINER_PROGRESSIVE: name = "Progressive"; break; default: GERROR_STREAM("Unrecognized image registration container mode type : " << v); } return name; } inline GT_IMAGE_REG_CONTAINER_MODE getImageRegContainerModeType(const std::string& name) { GT_IMAGE_REG_CONTAINER_MODE v; if ( name == "Pair-wise" ) { v = GT_IMAGE_REG_CONTAINER_PAIR_WISE; } else if ( name == "FixedReference" ) { v = GT_IMAGE_REG_CONTAINER_FIXED_REFERENCE; } else if ( name == "Progressive" ) { v = GT_IMAGE_REG_CONTAINER_PROGRESSIVE; } else { GERROR_STREAM("Unrecognized image registration container mode name : " << name); } return v; } /// perform the image registration over an image container2D template<typename TargetType, typename SourceType, typename CoordType> class hoImageRegContainer2DRegistration { public: typedef hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType> Self; typedef typename TargetType::value_type ValueType; enum { DIn = TargetType::NDIM }; enum { DOut = SourceType::NDIM }; typedef hoNDImage<ValueType, 2> Target2DType; typedef Target2DType Source2DType; typedef hoNDImage<ValueType, 3> Target3DType; typedef Target2DType Source3DType; typedef ValueType T; typedef ValueType element_type; typedef ValueType value_type; typedef CoordType coord_type; /// boundary handler and interpolator for target image typedef hoNDBoundaryHandler<TargetType> BoundaryHandlerTargetType; typedef hoNDBoundaryHandlerFixedValue<TargetType> BoundaryHandlerTargetFixedValueType; typedef hoNDBoundaryHandlerBorderValue<TargetType> BoundaryHandlerTargetBorderValueType; typedef hoNDBoundaryHandlerPeriodic<TargetType> BoundaryHandlerTargetPeriodicType; typedef hoNDBoundaryHandlerMirror<TargetType> BoundaryHandlerTargetMirrorType; typedef hoNDInterpolator<TargetType> InterpTargetType; typedef hoNDInterpolatorLinear<TargetType> InterpTargetLinearType; typedef hoNDInterpolatorNearestNeighbor<TargetType> InterpTargetNearestNeighborType; typedef hoNDInterpolatorBSpline<TargetType, DIn> InterpTargetBSplineType; /// boundary handler and interpolator for source image typedef hoNDBoundaryHandler<SourceType> BoundaryHandlerSourceType; typedef hoNDBoundaryHandlerFixedValue<SourceType> BoundaryHandlerSourceFixedValueType; typedef hoNDBoundaryHandlerBorderValue<SourceType> BoundaryHandlerSourceBorderValueType; typedef hoNDBoundaryHandlerPeriodic<SourceType> BoundaryHandlerSourcePeriodicType; typedef hoNDBoundaryHandlerMirror<SourceType> BoundaryHandlerSourceMirrorType; typedef hoNDInterpolator<SourceType> InterpSourceType; typedef hoNDInterpolatorLinear<SourceType> InterpSourceLinearType; typedef hoNDInterpolatorNearestNeighbor<SourceType> InterpSourceNearestNeighborType; typedef hoNDInterpolatorBSpline<SourceType, DIn> InterpSourceBSplineType; /// warper type typedef hoImageRegWarper<TargetType, SourceType, CoordType> WarperType; /// image dissimilarity type typedef hoImageRegDissimilarity<SourceType> DissimilarityType; /// transformation typedef hoImageRegParametricTransformation<CoordType, DIn, DOut> TransformationParametricType; typedef hoImageRegDeformationField<CoordType, DIn> TransformationDeformationFieldType; typedef typename TransformationDeformationFieldType::input_point_type input_point_type; typedef typename TransformationDeformationFieldType::output_point_type output_point_type; typedef typename TransformationDeformationFieldType::jacobian_position_type jacobian_position_type; typedef typename TransformationDeformationFieldType::DeformationFieldType DeformationFieldType; /// container typedef hoNDImageContainer2D<TargetType> TargetContinerType; typedef hoNDImageContainer2D<SourceType> SourceContinerType; typedef hoNDImageContainer2D<DeformationFieldType> DeformationFieldContinerType; hoImageRegContainer2DRegistration(unsigned int resolution_pyramid_levels=3, bool use_world_coordinates=false, ValueType bg_value=ValueType(0)); virtual ~hoImageRegContainer2DRegistration(); /// set the default parameters virtual bool setDefaultParameters(unsigned int resolution_pyramid_levels=3, bool use_world_coordinates=false); /// register two images /// transform or deform can contain the initial transformation or deformation /// if warped == NULL, warped images will not be computed virtual bool registerTwoImagesParametric(const TargetType& target, const SourceType& source, bool initial, TargetType* warped, TransformationParametricType& transform); virtual bool registerTwoImagesDeformationField(const TargetType& target, const SourceType& source, bool initial, TargetType* warped, DeformationFieldType** deform); virtual bool registerTwoImagesDeformationFieldBidirectional(const TargetType& target, const SourceType& source, bool initial, TargetType* warped, DeformationFieldType** deform, DeformationFieldType** deformInv); /// if warped is true, the warped images will be computed; if initial is true, the registration will be initialized by deformation_field_ and deformation_field_inverse_ virtual bool registerOverContainer2DPairWise(TargetContinerType& targetContainer, SourceContinerType& sourceContainer, bool warped, bool initial = false); virtual bool registerOverContainer2DFixedReference(TargetContinerType& targetContainer, const std::vector<unsigned int>& referenceFrame, bool warped, bool initial = false); virtual bool registerOverContainer2DProgressive(TargetContinerType& targetContainer, const std::vector<unsigned int>& referenceFrame); /// warp image containers template <typename TargetType2, typename SourceType2> bool warpContainer2D(const hoNDImageContainer2D< TargetType2 >& targetContainer, const hoNDImageContainer2D< SourceType2 >& sourceContainer, DeformationFieldContinerType deformation_field[], hoNDImageContainer2D< SourceType2 >& warppedContainer, Gadgetron::GT_BOUNDARY_CONDITION bh=GT_BOUNDARY_CONDITION_FIXEDVALUE) { try { typedef typename TargetType2::value_type ValueType2; typedef TargetType2 ImageTargetType; typedef SourceType2 ImageSourceType; size_t R = sourceContainer.rows(); std::vector<size_t> cols = sourceContainer.cols(); GADGET_CHECK_RETURN_FALSE(targetContainer.dimensions_equal_container(sourceContainer)); GADGET_CHECK_RETURN_FALSE(targetContainer.dimensions_equal_container(deformation_field[0])); if ( !targetContainer.dimensions_equal_container(warppedContainer) ) { GADGET_CHECK_RETURN_FALSE(warppedContainer.copyFrom(targetContainer)); } if ( R == 1 ) { long long N = (long long)cols[0]; long long c; #pragma omp parallel private(c) shared(N, targetContainer, sourceContainer, warppedContainer, deformation_field, bh) if ( DIn==2 ) { hoImageRegDeformationField<CoordType, DIn> deformTransform; hoNDBoundaryHandlerFixedValue< ImageSourceType > bhFixedValue; hoNDBoundaryHandlerBorderValue< ImageSourceType > bhBorderValue; hoNDBoundaryHandlerPeriodic< ImageSourceType > bhPeriodic; hoNDBoundaryHandlerMirror< ImageSourceType > bhMirror; hoNDInterpolatorBSpline<ImageSourceType, DIn> interpBSpline(5); hoImageRegWarper<ImageTargetType, ImageSourceType, CoordType> warper; warper.setBackgroundValue(bg_value_); warper.setTransformation(deformTransform); warper.setInterpolator(interpBSpline); #pragma omp for for ( c=0; c<N; c++ ) { const ImageTargetType& target = targetContainer(0, c); ImageSourceType& source = const_cast<ImageSourceType&>(sourceContainer(0, c)); ImageTargetType& warpped = warppedContainer(0, c); bhFixedValue.setArray( source ); interpBSpline.setArray( source ); if ( bh == GT_BOUNDARY_CONDITION_FIXEDVALUE ) interpBSpline.setBoundaryHandler(bhFixedValue); else if ( bh == GT_BOUNDARY_CONDITION_BORDERVALUE ) interpBSpline.setBoundaryHandler(bhBorderValue); else if ( bh == GT_BOUNDARY_CONDITION_PERIODIC ) interpBSpline.setBoundaryHandler(bhPeriodic); else if ( bh == GT_BOUNDARY_CONDITION_MIRROR ) interpBSpline.setBoundaryHandler(bhMirror); else interpBSpline.setBoundaryHandler(bhFixedValue); for ( unsigned int ii=0; ii<DIn; ii++ ) { deformTransform.setDeformationField( deformation_field[ii](0, c), ii ); } warper.warp(target, source, use_world_coordinates_, warpped); } } } else { long long r, c; #pragma omp parallel default(none) private(r, c) shared(targetContainer, sourceContainer, warppedContainer, deformation_field, R, cols, bh) if ( DIn==2 ) { hoImageRegDeformationField<CoordType, DIn> deformTransform; hoNDBoundaryHandlerFixedValue< ImageSourceType > bhFixedValue; hoNDBoundaryHandlerBorderValue< ImageSourceType > bhBorderValue; hoNDBoundaryHandlerPeriodic< ImageSourceType > bhPeriodic; hoNDBoundaryHandlerMirror< ImageSourceType > bhMirror; hoNDInterpolatorBSpline<ImageSourceType, DIn> interpBSpline(5); hoImageRegWarper<ImageTargetType, ImageSourceType, CoordType> warper; warper.setBackgroundValue(bg_value_); warper.setTransformation(deformTransform); warper.setInterpolator(interpBSpline); #pragma omp for for ( r=0; r<(long long)R; r++ ) { long long N = (long long)cols[r]; for ( c=0; c<N; c++ ) { const ImageTargetType& target = targetContainer(r, c); ImageSourceType& source = const_cast<ImageSourceType&>(sourceContainer(r, c)); ImageTargetType& warpped = warppedContainer(r, c); bhFixedValue.setArray( source ); interpBSpline.setArray( source ); if ( bh == GT_BOUNDARY_CONDITION_FIXEDVALUE ) interpBSpline.setBoundaryHandler(bhFixedValue); else if ( bh == GT_BOUNDARY_CONDITION_BORDERVALUE ) interpBSpline.setBoundaryHandler(bhBorderValue); else if ( bh == GT_BOUNDARY_CONDITION_PERIODIC ) interpBSpline.setBoundaryHandler(bhPeriodic); else if ( bh == GT_BOUNDARY_CONDITION_MIRROR ) interpBSpline.setBoundaryHandler(bhMirror); else interpBSpline.setBoundaryHandler(bhFixedValue); for ( unsigned int ii=0; ii<DIn; ii++ ) { deformTransform.setDeformationField( deformation_field[ii](r, c), ii ); } warper.warp(target, source, use_world_coordinates_, warpped); } } } } } catch(...) { GERROR_STREAM("Errors happened in hoImageRegContainer2DRegistration<...>::warpContainer2D(...) ... "); return false; } return true; } /// print the class information virtual void print(std::ostream& os) const; // ---------------------------------- // parameters // ---------------------------------- /// mode for registration over the container GT_IMAGE_REG_CONTAINER_MODE container_reg_mode_; /// mode for transformation GT_IMAGE_REG_TRANSFORMATION container_reg_transformation_; /// back ground values, used to mark regions in the target image which will not be warped ValueType bg_value_; /// whether to perform world coordinate registration bool use_world_coordinates_; /// number of resolution pyramid levels unsigned int resolution_pyramid_levels_; /// number of iterations for every pyramid level std::vector<unsigned int> max_iter_num_pyramid_level_; /// dissimilarity GT_IMAGE_DISSIMILARITY dissimilarity_type_; /// threshold for dissimilarity for every pyramid level std::vector<ValueType> dissimilarity_thres_pyramid_level_; /// number of search size division for every pyramid level std::vector<unsigned int> div_num_pyramid_level_; /// parameters for dissimilarity measures, for every paramid level /// LocalCCR std::vector<std::vector<ValueType> > dissimilarity_LocalCCR_sigmaArg_; /// Histogram based /// Mutual information std::vector<ValueType> dissimilarity_MI_betaArg_; /// regularization strength for every pyramid level /// if regularization_hilbert_strength_world_coordinate_=true, this strength is in the unit of world coordinate /// if regularization_hilbert_strength_world_coordinate_=false, this strength is in the unit of pixel bool regularization_hilbert_strength_world_coordinate_; std::vector< std::vector<ValueType> > regularization_hilbert_strength_pyramid_level_; /// boundary handler type std::vector<GT_BOUNDARY_CONDITION> boundary_handler_type_warper_; std::vector<GT_IMAGE_INTERPOLATOR> interp_type_warper_; /// number of iterations to improve the estimation of the inverse transform std::vector<unsigned int> inverse_deform_enforce_iter_pyramid_level_; /// weight to update the estimation of the inverse transform, must be within [0 1] std::vector<CoordType> inverse_deform_enforce_weight_pyramid_level_; /// in-FOV constraint bool apply_in_FOV_constraint_; /// divergence free constraint bool apply_divergence_free_constraint_; /// verbose mode bool verbose_; // ---------------------------------- // debug and timing // ---------------------------------- // clock for timing Gadgetron::GadgetronTimer gt_timer1_; Gadgetron::GadgetronTimer gt_timer2_; Gadgetron::GadgetronTimer gt_timer3_; bool performTiming_; // exporter Gadgetron::ImageIOAnalyze gt_exporter_; // debug folder std::string debugFolder_; // ---------------------------------- // registration results // ---------------------------------- /// warpped images TargetContinerType warped_container_; /// for parametric registration std::vector< std::vector<TransformationParametricType*> > parametric_tranformation_; /// deformation field registration DeformationFieldContinerType deformation_field_[DIn]; DeformationFieldContinerType deformation_field_inverse_[DIn]; protected: bool initialize(const TargetContinerType& targetContainer, bool warped); }; template<typename TargetType, typename SourceType, typename CoordType> hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>:: hoImageRegContainer2DRegistration(unsigned int resolution_pyramid_levels, bool use_world_coordinates, ValueType bg_value) : bg_value_(bg_value), use_world_coordinates_(use_world_coordinates), resolution_pyramid_levels_(resolution_pyramid_levels), performTiming_(false) { gt_timer1_.set_timing_in_destruction(false); gt_timer2_.set_timing_in_destruction(false); gt_timer3_.set_timing_in_destruction(false); GADGET_CHECK_THROW(this->setDefaultParameters(resolution_pyramid_levels, use_world_coordinates)); } template<typename TargetType, typename SourceType, typename CoordType> hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>:: ~hoImageRegContainer2DRegistration() { if ( !parametric_tranformation_.empty() ) { size_t r, c; for ( r=0; r<parametric_tranformation_.size(); r++ ) { if ( !parametric_tranformation_[r].empty() ) { for ( c=0; c<parametric_tranformation_[r].size(); c++ ) { if ( parametric_tranformation_[r][c] != NULL ) { delete parametric_tranformation_[r][c]; parametric_tranformation_[r][c] = NULL; } } } } } } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>::setDefaultParameters(unsigned int resolution_pyramid_levels, bool use_world_coordinates) { unsigned int ii; use_world_coordinates_ = use_world_coordinates; resolution_pyramid_levels_ = resolution_pyramid_levels; container_reg_mode_ = GT_IMAGE_REG_CONTAINER_PAIR_WISE; container_reg_transformation_ = GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD; max_iter_num_pyramid_level_.clear(); max_iter_num_pyramid_level_.resize(resolution_pyramid_levels_, 32); max_iter_num_pyramid_level_[0] = 16; dissimilarity_type_ = GT_IMAGE_DISSIMILARITY_LocalCCR; dissimilarity_thres_pyramid_level_.clear(); dissimilarity_thres_pyramid_level_.resize(resolution_pyramid_levels_, (ValueType)(1e-5) ); div_num_pyramid_level_.clear(); div_num_pyramid_level_.resize(resolution_pyramid_levels_, 2); dissimilarity_LocalCCR_sigmaArg_.clear(); dissimilarity_LocalCCR_sigmaArg_.resize(resolution_pyramid_levels_); for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { dissimilarity_LocalCCR_sigmaArg_[ii].resize(DIn, 2.0); } dissimilarity_MI_betaArg_.clear(); dissimilarity_MI_betaArg_.resize(resolution_pyramid_levels_, 2); regularization_hilbert_strength_world_coordinate_ = false; regularization_hilbert_strength_pyramid_level_.resize(resolution_pyramid_levels_); for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { regularization_hilbert_strength_pyramid_level_[ii].resize(DIn, 12.0); } boundary_handler_type_warper_.clear(); boundary_handler_type_warper_.resize(resolution_pyramid_levels_, GT_BOUNDARY_CONDITION_BORDERVALUE); interp_type_warper_.clear(); interp_type_warper_.resize(resolution_pyramid_levels_, GT_IMAGE_INTERPOLATOR_LINEAR); inverse_deform_enforce_iter_pyramid_level_.clear(); inverse_deform_enforce_iter_pyramid_level_.resize(resolution_pyramid_levels_, 10); inverse_deform_enforce_weight_pyramid_level_.clear(); inverse_deform_enforce_weight_pyramid_level_.resize(resolution_pyramid_levels_, 0.5); apply_in_FOV_constraint_ = false; apply_divergence_free_constraint_ = false; verbose_ = false; return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>:: registerTwoImagesParametric(const TargetType& target, const SourceType& source, bool initial, TargetType* warped, TransformationParametricType& transform) { try { } catch(...) { GERROR_STREAM("Error happened in hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>::registerTwoImagesParametric(...) ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>:: registerTwoImagesDeformationField(const TargetType& target, const SourceType& source, bool initial, TargetType* warped, DeformationFieldType** deform) { try { GADGET_CHECK_RETURN_FALSE(deform!=NULL); hoImageRegDeformationFieldRegister<TargetType, CoordType> reg(resolution_pyramid_levels_, use_world_coordinates_, bg_value_); if ( !debugFolder_.empty() ) { reg.debugFolder_ = debugFolder_; } GADGET_CHECK_RETURN_FALSE(reg.setDefaultParameters(resolution_pyramid_levels_, use_world_coordinates_)); reg.max_iter_num_pyramid_level_ = max_iter_num_pyramid_level_; reg.div_num_pyramid_level_ = div_num_pyramid_level_; reg.dissimilarity_MI_betaArg_ = dissimilarity_MI_betaArg_; reg.regularization_hilbert_strength_world_coordinate_ = regularization_hilbert_strength_world_coordinate_; reg.regularization_hilbert_strength_pyramid_level_ = regularization_hilbert_strength_pyramid_level_; reg.dissimilarity_LocalCCR_sigmaArg_ = dissimilarity_LocalCCR_sigmaArg_; reg.boundary_handler_type_warper_ = boundary_handler_type_warper_; reg.interp_type_warper_ = interp_type_warper_; reg.apply_in_FOV_constraint_ = apply_in_FOV_constraint_; reg.apply_divergence_free_constraint_ = apply_divergence_free_constraint_; reg.verbose_ = verbose_; reg.dissimilarity_type_.clear(); reg.dissimilarity_type_.resize(resolution_pyramid_levels_, dissimilarity_type_); reg.setTarget( const_cast<TargetType&>(target) ); reg.setSource( const_cast<TargetType&>(source) ); if ( verbose_ ) { std::ostringstream outs; reg.print(outs); GDEBUG_STREAM(outs.str()); } GADGET_CHECK_RETURN_FALSE(reg.initialize()); unsigned int d; if ( target.dimensions_equal( *(deform[0]) ) ) { if ( initial ) { for ( d=0; d<DIn; d++ ) { reg.transform_->setDeformationField( *(deform[d]), d); } } } else { for ( d=0; d<DIn; d++ ) { deform[d]->copyImageInfo(target); Gadgetron::clear( *(deform[d]) ); } } GADGET_CHECK_RETURN_FALSE(reg.performRegistration()); for ( d=0; d<DIn; d++ ) { *(deform[d]) = reg.transform_->getDeformationField(d); } if ( warped != NULL ) { /// bspline warp hoNDBoundaryHandlerFixedValue<SourceType> bhFixedValue; bhFixedValue.setArray( const_cast<SourceType&>(source) ); hoNDInterpolatorBSpline<SourceType, DIn> interpBSpline(5); interpBSpline.setArray( const_cast<SourceType&>(source) ); interpBSpline.setBoundaryHandler(bhFixedValue); hoImageRegWarper<TargetType, SourceType, CoordType> warper; warper.setBackgroundValue(bg_value_); warper.setTransformation(*reg.transform_); warper.setInterpolator(interpBSpline); warper.warp(target, source, use_world_coordinates_, *warped); } } catch(...) { GERROR_STREAM("Error happened in hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>::registerTwoImagesDeformationField(...) ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>:: registerTwoImagesDeformationFieldBidirectional(const TargetType& target, const SourceType& source, bool initial, TargetType* warped, DeformationFieldType** deform, DeformationFieldType** deformInv) { try { GADGET_CHECK_RETURN_FALSE(deform!=NULL); GADGET_CHECK_RETURN_FALSE(deformInv!=NULL); hoImageRegDeformationFieldBidirectionalRegister<TargetType, CoordType> reg(resolution_pyramid_levels_, use_world_coordinates_, bg_value_); if ( !debugFolder_.empty() ) { reg.debugFolder_ = debugFolder_; } GADGET_CHECK_RETURN_FALSE(reg.setDefaultParameters(resolution_pyramid_levels_, use_world_coordinates_)); reg.max_iter_num_pyramid_level_ = max_iter_num_pyramid_level_; reg.div_num_pyramid_level_ = div_num_pyramid_level_; reg.dissimilarity_MI_betaArg_ = dissimilarity_MI_betaArg_; reg.regularization_hilbert_strength_world_coordinate_ = regularization_hilbert_strength_world_coordinate_; reg.regularization_hilbert_strength_pyramid_level_ = regularization_hilbert_strength_pyramid_level_; reg.dissimilarity_LocalCCR_sigmaArg_ = dissimilarity_LocalCCR_sigmaArg_; reg.boundary_handler_type_warper_ = boundary_handler_type_warper_; reg.interp_type_warper_ = interp_type_warper_; reg.inverse_deform_enforce_iter_pyramid_level_ = inverse_deform_enforce_iter_pyramid_level_; reg.inverse_deform_enforce_weight_pyramid_level_ = inverse_deform_enforce_weight_pyramid_level_; reg.apply_in_FOV_constraint_ = apply_in_FOV_constraint_; reg.apply_divergence_free_constraint_ = apply_divergence_free_constraint_; reg.verbose_ = verbose_; reg.dissimilarity_type_.clear(); reg.dissimilarity_type_.resize(resolution_pyramid_levels_, dissimilarity_type_); reg.setTarget( const_cast<TargetType&>(target) ); reg.setSource( const_cast<SourceType&>(source) ); if ( verbose_ ) { Gadgetron::printInfo(reg); } GADGET_CHECK_RETURN_FALSE(reg.initialize()); unsigned int d; if ( target.dimensions_equal( *(deform[0]) ) ) { if ( initial ) { for ( d=0; d<DIn; d++ ) { reg.transform_->setDeformationField( *(deform[d]), d); reg.transform_inverse_->setDeformationField( *(deformInv[d]), d); } } } else { for ( d=0; d<DIn; d++ ) { deform[d]->copyImageInfo(target); Gadgetron::clear( *(deform[d]) ); deformInv[d]->copyImageInfo(target); Gadgetron::clear( *(deformInv[d]) ); } } GADGET_CHECK_RETURN_FALSE(reg.performRegistration()); for ( d=0; d<DIn; d++ ) { *(deform[d]) = reg.transform_->getDeformationField(d); *(deformInv[d]) = reg.transform_inverse_->getDeformationField(d); } if ( warped != NULL ) { /// bspline warp // hoNDBoundaryHandlerFixedValue<SourceType> bhFixedValue; hoNDBoundaryHandlerBorderValue<SourceType> bhFixedValue; bhFixedValue.setArray(const_cast<SourceType&>(source)); hoNDInterpolatorBSpline<SourceType, DIn> interpBSpline(5); interpBSpline.setArray(const_cast<SourceType&>(source)); interpBSpline.setBoundaryHandler(bhFixedValue); hoImageRegWarper<TargetType, SourceType, CoordType> warper; warper.setBackgroundValue(bg_value_); warper.setTransformation(*reg.transform_); warper.setInterpolator(interpBSpline); GADGET_CHECK_RETURN_FALSE(warper.warp(target, source, use_world_coordinates_, *warped)); } } catch(...) { GERROR_STREAM("Error happened in hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>::registerTwoImagesDeformationFieldBidirectional(...) ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>:: initialize(const TargetContinerType& targetContainer, bool warped) { try { if ( warped ) { GADGET_CHECK_RETURN_FALSE(warped_container_.copyFrom(targetContainer)); } std::vector<size_t> col = targetContainer.cols(); unsigned int ii; if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD ) { for ( ii=0; ii<DIn; ii++ ) { GADGET_CHECK_RETURN_FALSE(deformation_field_[ii].create(col)); GADGET_CHECK_RETURN_FALSE(deformation_field_[ii].fillWithZeros()); } } else if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD_BIDIRECTIONAL ) { for ( ii=0; ii<DIn; ii++ ) { GADGET_CHECK_RETURN_FALSE(deformation_field_[ii].create(col)); GADGET_CHECK_RETURN_FALSE(deformation_field_[ii].fillWithZeros()); GADGET_CHECK_RETURN_FALSE(deformation_field_inverse_[ii].create(col)); GADGET_CHECK_RETURN_FALSE(deformation_field_inverse_[ii].fillWithZeros()); } } else if ( container_reg_transformation_==GT_IMAGE_REG_TRANSFORMATION_RIGID || container_reg_transformation_==GT_IMAGE_REG_TRANSFORMATION_AFFINE ) { GDEBUG_STREAM("To be implemented ..."); } } catch(...) { GERROR_STREAM("Error happened in hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>::initialize(const TargetContinerType& targetContainer) ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>:: registerOverContainer2DPairWise(TargetContinerType& targetContainer, SourceContinerType& sourceContainer, bool warped, bool initial) { try { GADGET_CHECK_RETURN_FALSE(this->initialize(targetContainer, warped)); std::vector<TargetType*> targetImages; targetContainer.get_all_images(targetImages); std::vector<SourceType*> sourceImages; sourceContainer.get_all_images(sourceImages); long long numOfImages = targetImages.size(); GADGET_CHECK_RETURN_FALSE(numOfImages==sourceImages.size()); std::vector<SourceType*> warpedImages(numOfImages, NULL); if ( warped ) { warped_container_.get_all_images(warpedImages); } GDEBUG_STREAM("registerOverContainer2DPairWise - threading ... "); int numOfThreads = 1; #ifdef USE_OMP int numOfProcs = omp_get_num_procs(); int nested = omp_get_nested(); GDEBUG_STREAM("registerOverContainer2DPairWise - nested openMP is " << nested); /*if (numOfImages < numOfProcs - 1) { omp_set_nested(1); GDEBUG_STREAM("registerOverContainer2DPairWise - nested openMP on ... "); } else { omp_set_nested(0); GDEBUG_STREAM("registerOverContainer2DPairWise - nested openMP off ... "); }*/ numOfThreads = (numOfImages>numOfProcs) ? numOfProcs : numOfImages; #endif // USE_OMP unsigned int ii; long long n; if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD ) { std::vector< std::vector<DeformationFieldType*> > deform(DIn); for ( ii=0; ii<DIn; ii++ ) { deformation_field_[ii].get_all_images(deform[ii]); } #pragma omp parallel default(none) private(n, ii) shared(numOfImages, initial, targetImages, sourceImages, deform, warpedImages) num_threads(numOfThreads) { DeformationFieldType* deformCurr[DIn]; #pragma omp for for ( n=0; n<numOfImages; n++ ) { TargetType& target = *(targetImages[n]); SourceType& source = *(sourceImages[n]); if ( &target == &source ) { for ( ii=0; ii<DIn; ii++ ) { deform[ii][n]->create(target.get_dimensions()); Gadgetron::clear( *deform[ii][n] ); } } else { for ( ii=0; ii<DIn; ii++ ) { deformCurr[ii] = deform[ii][n]; } registerTwoImagesDeformationField(target, source, initial, warpedImages[n], deformCurr); } } } } else if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD_BIDIRECTIONAL ) { std::vector< std::vector<DeformationFieldType*> > deform(DIn); std::vector< std::vector<DeformationFieldType*> > deformInv(DIn); for ( ii=0; ii<DIn; ii++ ) { deformation_field_[ii].get_all_images(deform[ii]); deformation_field_inverse_[ii].get_all_images(deformInv[ii]); } #pragma omp parallel default(none) private(n, ii) shared(numOfImages, initial, targetImages, sourceImages, deform, deformInv, warpedImages) num_threads(numOfThreads) { DeformationFieldType* deformCurr[DIn]; DeformationFieldType* deformInvCurr[DIn]; #pragma omp for for ( n=0; n<numOfImages; n++ ) { TargetType& target = *(targetImages[n]); SourceType& source = *(sourceImages[n]); if ( &target == &source ) { for ( ii=0; ii<DIn; ii++ ) { deform[ii][n]->create(target.get_dimensions()); Gadgetron::clear( *deform[ii][n] ); deformInv[ii][n]->create(source.get_dimensions()); Gadgetron::clear( *deformInv[ii][n] ); } } else { for ( ii=0; ii<DIn; ii++ ) { deformCurr[ii] = deform[ii][n]; deformInvCurr[ii] = deformInv[ii][n]; } registerTwoImagesDeformationFieldBidirectional(target, source, initial, warpedImages[n], deformCurr, deformInvCurr); } } } } else if ( container_reg_transformation_==GT_IMAGE_REG_TRANSFORMATION_RIGID || container_reg_transformation_==GT_IMAGE_REG_TRANSFORMATION_AFFINE ) { GDEBUG_STREAM("To be implemented ..."); } //#ifdef USE_OMP // omp_set_nested(nested); //#endif // USE_OMP } catch(...) { GERROR_STREAM("Error happened in hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>::registerOverContainer2DPairWise(...) ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>:: registerOverContainer2DFixedReference(TargetContinerType& imageContainer, const std::vector<unsigned int>& referenceFrame, bool warped, bool initial) { try { GADGET_CHECK_RETURN_FALSE(this->initialize(imageContainer, warped)); size_t row = imageContainer.rows(); std::vector<size_t> col = imageContainer.cols(); GADGET_CHECK_RETURN_FALSE(referenceFrame.size() == col.size()); std::vector<SourceType*> sourceImages; imageContainer.get_all_images(sourceImages); long long numOfImages = (long long)sourceImages.size(); // warped images std::vector<SourceType*> warpedImages(numOfImages, NULL); if ( warped ) { warped_container_.get_all_images(warpedImages); } unsigned int ii; long long n; size_t r, c; // fill in the reference frames std::vector<TargetType*> targetImages(numOfImages, NULL); size_t ind=0; for ( r=0; r<row; r++ ) { TargetType& ref = const_cast<TargetType&>(imageContainer(r, referenceFrame[r])); for ( c=0; c<col[r]; c++ ) { targetImages[ind] = &ref; ind++; } } GADGET_CHECK_RETURN_FALSE(numOfImages==targetImages.size()); int numOfThreads = 1; #ifdef USE_OMP int numOfProcs = omp_get_num_procs(); int nested = omp_get_nested(); GDEBUG_STREAM("registerOverContainer2DFixedReference - nested openMP is " << nested); //if (numOfImages < numOfProcs - 1) //{ // omp_set_nested(1); // GDEBUG_STREAM("registerOverContainer2DFixedReference - nested openMP on ... "); //} //else //{ // omp_set_nested(0); // GDEBUG_STREAM("registerOverContainer2DFixedReference - nested openMP off ... "); //} numOfThreads = (numOfImages>numOfProcs) ? numOfProcs : numOfImages; #endif // USE_OMP if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD ) { std::vector< std::vector<DeformationFieldType*> > deform(DIn); for ( ii=0; ii<DIn; ii++ ) { deformation_field_[ii].get_all_images(deform[ii]); } #pragma omp parallel default(none) private(n, ii) shared(numOfImages, initial, targetImages, sourceImages, deform, warpedImages) num_threads(numOfThreads) { DeformationFieldType* deformCurr[DIn]; #pragma omp for for ( n=0; n<numOfImages; n++ ) { if ( targetImages[n] == sourceImages[n] ) { if ( warpedImages[n] != NULL ) { *(warpedImages[n]) = *(targetImages[n]); } for ( ii=0; ii<DIn; ii++ ) { deform[ii][n]->create(targetImages[n]->get_dimensions()); Gadgetron::clear(*deform[ii][n]); } continue; } TargetType& target = *(targetImages[n]); SourceType& source = *(sourceImages[n]); for ( ii=0; ii<DIn; ii++ ) { deformCurr[ii] = deform[ii][n]; } registerTwoImagesDeformationField(target, source, initial, warpedImages[n], deformCurr); } } } else if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD_BIDIRECTIONAL ) { std::vector< std::vector<DeformationFieldType*> > deform(DIn); std::vector< std::vector<DeformationFieldType*> > deformInv(DIn); for ( ii=0; ii<DIn; ii++ ) { deformation_field_[ii].get_all_images(deform[ii]); deformation_field_inverse_[ii].get_all_images(deformInv[ii]); } #pragma omp parallel default(none) private(n, ii) shared(numOfImages, initial, targetImages, sourceImages, deform, deformInv, warpedImages) num_threads(numOfThreads) { DeformationFieldType* deformCurr[DIn]; DeformationFieldType* deformInvCurr[DIn]; #pragma omp for for ( n=0; n<numOfImages; n++ ) { if ( targetImages[n] == sourceImages[n] ) { if ( warpedImages[n] != NULL ) { *(warpedImages[n]) = *(targetImages[n]); } for ( ii=0; ii<DIn; ii++ ) { deform[ii][n]->create(targetImages[n]->get_dimensions()); Gadgetron::clear(*deform[ii][n]); deformInv[ii][n]->create(targetImages[n]->get_dimensions()); Gadgetron::clear(*deformInv[ii][n]); } continue; } TargetType& target = *(targetImages[n]); SourceType& source = *(sourceImages[n]); for ( ii=0; ii<DIn; ii++ ) { deformCurr[ii] = deform[ii][n]; deformInvCurr[ii] = deformInv[ii][n]; } registerTwoImagesDeformationFieldBidirectional(target, source, initial, warpedImages[n], deformCurr, deformInvCurr); } } } else if ( container_reg_transformation_==GT_IMAGE_REG_TRANSFORMATION_RIGID || container_reg_transformation_==GT_IMAGE_REG_TRANSFORMATION_AFFINE ) { GDEBUG_STREAM("To be implemented ..."); } //#ifdef USE_OMP // omp_set_nested(nested); //#endif // USE_OMP } catch(...) { GERROR_STREAM("Error happened in hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>::registerOverContainer2DFixedReference(...) ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> bool hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>:: registerOverContainer2DProgressive(TargetContinerType& imageContainer, const std::vector<unsigned int>& referenceFrame) { try { bool warped = true; GADGET_CHECK_RETURN_FALSE(this->initialize(imageContainer, warped)); long long row = (long long)imageContainer.rows(); std::vector<size_t> col = imageContainer.cols(); GADGET_CHECK_RETURN_FALSE(referenceFrame.size() == col.size()); unsigned int ii; long long n; long long r, c; // for every row, two registration tasks can be formatted long long numOfTasks = (long long)(2*row); GDEBUG_STREAM("hoImageRegContainer2DRegistration<...>::registerOverContainer2DProgressive(...), numOfTasks : " << numOfTasks); std::vector< std::vector<TargetType*> > regImages(numOfTasks); std::vector< std::vector<TargetType*> > warpedImages(numOfTasks); std::vector< std::vector< std::vector<DeformationFieldType*> > > deform(DIn); std::vector< std::vector< std::vector<DeformationFieldType*> > > deformInv(DIn); for ( ii=0; ii<DIn; ii++ ) { deform[ii].resize(numOfTasks); deformInv[ii].resize(numOfTasks); } for ( r=0; r<row; r++ ) { unsigned int refFrame = referenceFrame[r]; regImages[2*r].resize(col[r]-refFrame); regImages[2*r+1].resize(1+refFrame); warpedImages[2*r].resize(col[r]-refFrame); warpedImages[2*r+1].resize(1+refFrame); // copy over the reference frame warped_container_(r, refFrame) = imageContainer(r, refFrame); if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD ) { for ( ii=0; ii<DIn; ii++ ) { deformation_field_[ii](r, refFrame).create(imageContainer(r, refFrame).get_dimensions()); Gadgetron::clear(deformation_field_[ii](r, refFrame)); } } if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD_BIDIRECTIONAL ) { for ( ii=0; ii<DIn; ii++ ) { deformation_field_[ii](r, refFrame).create(imageContainer(r, refFrame).get_dimensions()); Gadgetron::clear(deformation_field_[ii](r, refFrame)); deformation_field_inverse_[ii](r, refFrame).create(imageContainer(r, refFrame).get_dimensions()); Gadgetron::clear(deformation_field_inverse_[ii](r, refFrame)); } } // task one for ( c=refFrame; c<(long long)col[r]; c++ ) { regImages[2*r][c-refFrame] = &(imageContainer(r, c)); warpedImages[2*r][c-refFrame] = &(warped_container_(r, c)); } // task two for ( c=refFrame; c>=0; c-- ) { regImages[2*r+1][refFrame-c] = &(imageContainer(r, c)); warpedImages[2*r+1][refFrame-c] = &(warped_container_(r, c)); } for ( ii=0; ii<DIn; ii++ ) { if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD ) { deform[ii][2*r].resize(col[r]-refFrame); deform[ii][2*r+1].resize(1+refFrame); // task one for ( c=refFrame; c<(long long)col[r]; c++ ) { deform[ii][2*r][c-refFrame] = &(deformation_field_[ii](r, c)); } // task two for ( c=refFrame; c>=0; c-- ) { deform[ii][2*r+1][refFrame-c] = &(deformation_field_[ii](r, c)); } } if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD_BIDIRECTIONAL ) { deform[ii][2*r].resize(col[r]-refFrame); deform[ii][2*r+1].resize(1+refFrame); deformInv[ii][2*r].resize(col[r]-refFrame); deformInv[ii][2*r+1].resize(1+refFrame); // task one for ( c=refFrame; c<(long long)col[r]; c++ ) { deform[ii][2*r][c-refFrame] = &(deformation_field_[ii](r, c)); deformInv[ii][2*r][c-refFrame] = &(deformation_field_inverse_[ii](r, c)); } // task two for ( c=refFrame; c>=0; c-- ) { deform[ii][2*r+1][refFrame-c] = &(deformation_field_[ii](r, c)); deformInv[ii][2*r+1][refFrame-c] = &(deformation_field_inverse_[ii](r, c)); } } } } if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD ) { bool initial = false; #pragma omp parallel default(none) private(n, ii) shared(numOfTasks, initial, regImages, warpedImages, deform) { DeformationFieldType* deformCurr[DIn]; #pragma omp for for ( n=0; n<numOfTasks; n++ ) { size_t numOfImages = regImages[n].size(); // no need to copy the refrence frame to warped size_t k; for ( k=1; k<numOfImages; k++ ) { TargetType& target = *(warpedImages[n][k-1]); SourceType& source = *(regImages[n][k]); for ( ii=0; ii<DIn; ii++ ) { deformCurr[ii] = deform[ii][n][k]; } registerTwoImagesDeformationField(target, source, initial, warpedImages[n][k], deformCurr); } } } } else if ( container_reg_transformation_ == GT_IMAGE_REG_TRANSFORMATION_DEFORMATION_FIELD_BIDIRECTIONAL ) { bool initial = false; #pragma omp parallel default(none) private(n, ii) shared(numOfTasks, initial, regImages, warpedImages, deform, deformInv) { DeformationFieldType* deformCurr[DIn]; DeformationFieldType* deformInvCurr[DIn]; #pragma omp for for ( n=0; n<numOfTasks; n++ ) { size_t numOfImages = regImages[n].size(); size_t k; for ( k=1; k<numOfImages; k++ ) { TargetType& target = *(warpedImages[n][k-1]); SourceType& source = *(regImages[n][k]); for ( ii=0; ii<DIn; ii++ ) { deformCurr[ii] = deform[ii][n][k]; deformInvCurr[ii] = deformInv[ii][n][k]; } registerTwoImagesDeformationFieldBidirectional(target, source, initial, warpedImages[n][k], deformCurr, deformInvCurr); } } } } else if ( container_reg_transformation_==GT_IMAGE_REG_TRANSFORMATION_RIGID || container_reg_transformation_==GT_IMAGE_REG_TRANSFORMATION_AFFINE ) { GDEBUG_STREAM("To be implemented ..."); } } catch(...) { GERROR_STREAM("Error happened in hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>::registerOverContainer2DProgressive(...) ... "); return false; } return true; } template<typename TargetType, typename SourceType, typename CoordType> void hoImageRegContainer2DRegistration<TargetType, SourceType, CoordType>::print(std::ostream& os) const { using namespace std; unsigned int ii, jj; os << "--------------Gagdgetron image registration container 2D -------------" << endl; os << "Input dimension is : " << DIn << endl; os << "Output dimension is : " << DOut << endl; std::string elemTypeName = std::string(typeid(ValueType).name()); os << "Image data type is : " << elemTypeName << std::endl; elemTypeName = std::string(typeid(CoordType).name()); os << "Transformation coordinate data type is : " << elemTypeName << std::endl; os << "Whether to apply in_FOV constraint : " << apply_in_FOV_constraint_ << std::endl; os << "Whether to apply divergence free constraint : " << apply_divergence_free_constraint_ << std::endl; os << "Whether to perform world coordinate registration is : " << use_world_coordinates_ << std::endl; os << "Number of resolution pyramid levels is : " << resolution_pyramid_levels_ << std::endl; os << "------------" << std::endl; os << "Number of iterations is : " << std::endl; for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { os << " Level " << ii << " - " << max_iter_num_pyramid_level_[ii] << std::endl; } os << "------------" << std::endl; os << "Image dissimilarity is : " << std::endl; for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { os << " Level " << ii << " - " << getDissimilarityName(dissimilarity_type_) << std::endl; } os << "------------" << std::endl; os << "Threshold for dissimilarity is : " << std::endl; for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { os << " Level " << ii << " - " << dissimilarity_thres_pyramid_level_[ii] << std::endl; } os << "------------" << std::endl; os << "Number of search size division is : " << std::endl; for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { os << " Level " << ii << " - " << div_num_pyramid_level_[ii] << std::endl; } os << "------------" << std::endl; if ( regularization_hilbert_strength_world_coordinate_ ) { os << "Regularization strength is in the unit of physical metric, e.g. mm ... "; } else { os << "Regularization strength is in the unit of image pixel size ... "; } os << "Regularization strength for every pyramid level is : " << std::endl; for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { os << " Level " << ii << " - [ "; for( jj=0; jj<DIn; jj++ ) { os << regularization_hilbert_strength_pyramid_level_[ii][jj] << " "; } os << " ] " << std::endl; } os << "------------" << std::endl; os << "Boundary handler and interpolator type for warper is : " << std::endl; for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { os << " Level " << ii << " - " << getBoundaryHandlerName(boundary_handler_type_warper_[ii]) << " - " << getInterpolatorName(interp_type_warper_[ii]) << std::endl; } os << "------------" << std::endl; os << "Number of iterations to improve the estimation of the inverse transform is : " << std::endl; for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { os << " Level " << ii << " - " << inverse_deform_enforce_iter_pyramid_level_[ii] << std::endl; } os << "------------" << std::endl; os << "Weight to update the estimation of the inverse transform is : " << std::endl; for ( ii=0; ii<resolution_pyramid_levels_; ii++ ) { os << " Level " << ii << " - " << inverse_deform_enforce_weight_pyramid_level_[ii] << std::endl; } os << "------------" << std::endl; } } #endif // hoImageRegContainer2DRegistration_H_
resize-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file resize-inl.h * \brief image resize operator using opencv and only support bilinear resize * \author Jake Lee */ #ifndef MXNET_OPERATOR_IMAGE_RESIZE_INL_H_ #define MXNET_OPERATOR_IMAGE_RESIZE_INL_H_ #include <mxnet/base.h> #include <vector> #include "../mxnet_op.h" #include "../operator_common.h" #include "image_utils.h" #if MXNET_USE_OPENCV #include <opencv2/opencv.hpp> #endif // MXNET_USE_OPENCV namespace mxnet { namespace op { namespace image { using namespace mshadow; #if MXNET_USE_CUDA template<typename DType, typename T, typename Acctype> void ResizeImplCUDA(Stream<gpu> *s, const T input, const T output); #endif // MXNET_USE_CUDA struct ResizeParam : public dmlc::Parameter<ResizeParam> { mxnet::Tuple<int> size; bool keep_ratio; int interp; DMLC_DECLARE_PARAMETER(ResizeParam) { DMLC_DECLARE_FIELD(size) .set_default(mxnet::Tuple<int>()) .describe("Size of new image. Could be (width, height) or (size)"); DMLC_DECLARE_FIELD(keep_ratio) .describe("Whether to resize the short edge or both edges to `size`, " "if size is give as an integer.") .set_default(false); DMLC_DECLARE_FIELD(interp) .set_default(1) .describe("Interpolation method for resizing. By default uses bilinear interpolation" "Options are INTER_NEAREST - a nearest-neighbor interpolation" "INTER_LINEAR - a bilinear interpolation" "INTER_AREA - resampling using pixel area relation" "INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood" "INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood" "Note that the GPU version only support bilinear interpolation(1)"); } }; // handle the keep ratio param inline SizeParam GetHeightAndWidth(int data_h, int data_w, const ResizeParam& param) { CHECK((param.size.ndim() == 1) || (param.size.ndim() == 2)) << "Input size dimension must be 1 or 2, but got " << param.size.ndim(); int resized_h; int resized_w; if (param.size.ndim() == 1) { CHECK_GT(param.size[0], 0) << "Input size should be greater than 0, but got " << param.size[0]; if (!param.keep_ratio) { resized_h = param.size[0]; resized_w = param.size[0]; } else { if (data_h > data_w) { resized_w = param.size[0]; resized_h = static_cast<int>(data_h * resized_w / data_w); } else { resized_h = param.size[0]; resized_w = static_cast<int>(data_w * resized_h / data_h); } } } else { CHECK_GT(param.size[0], 0) << "Input width should be greater than 0, but got " << param.size[0]; CHECK_GT(param.size[1], 0) << "Input height should be greater than 0, but got " << param.size[1]; resized_h = param.size[1]; resized_w = param.size[0]; } return SizeParam(resized_h, resized_w); } inline bool ResizeShapeImpl(const ResizeParam& param, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { // input attrs should only be (h, w, c) or (n, h, w, c) CHECK((in_attrs->at(0).ndim() == 3U) || (in_attrs->at(0).ndim() == 4U)) << "Input image dimension should be 3 or 4 but got " << in_attrs->at(0).ndim(); const auto& ishape = (*in_attrs)[0]; SizeParam size; if (ishape.ndim() == 3) { size = GetHeightAndWidth(ishape[H], ishape[W], param); SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({size.height, size.width, ishape[C]})); } else { size = GetHeightAndWidth(ishape[kH], ishape[kW], param); SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({ishape[N], size.height, size.width, ishape[kC]})); } return true; } inline bool ResizeShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector *in_attrs, mxnet::ShapeVector *out_attrs) { const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed); return ResizeShapeImpl(param, in_attrs, out_attrs); } inline void ResizeImpl(const std::vector<TBlob> &inputs, const std::vector<TBlob> &outputs, const int height, const int width, const int interp, const int input_index = 0, const int output_index = 0) { #if MXNET_USE_OPENCV CHECK_NE(inputs[0].type_flag_, mshadow::kFloat16) << "opencv image mat doesn't support fp16"; CHECK((inputs[0].type_flag_ != mshadow::kInt32) || (inputs[0].type_flag_ != mshadow::kInt64)) << "opencv resize doesn't support int32, int64"; // mapping to opencv matrix element type according to channel const int DTYPE[] = {CV_32F, CV_64F, -1, CV_8U, CV_32S}; if (inputs[0].ndim() == 3) { const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[C]); cv::Mat buf(inputs[0].shape_[H], inputs[0].shape_[W], cv_type, inputs[0].dptr_); cv::Mat dst(outputs[0].shape_[H], outputs[0].shape_[W], cv_type, outputs[0].dptr_); cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp); CHECK(!dst.empty()); CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr_); } else { const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[kC]); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { cv::Mat buf(inputs[0].shape_[kH], inputs[0].shape_[kW], cv_type, inputs[0].dptr<DType>() + input_index); cv::Mat dst(outputs[0].shape_[kH], outputs[0].shape_[kW], cv_type, outputs[0].dptr<DType>() + output_index); cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp); CHECK(!dst.empty()); CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr<DType>() + output_index); }); } #else LOG(FATAL) << "Build with USE_OPENCV=1 for image resize operator."; #endif // MXNET_USE_OPENCV } template <typename xpu> inline void ResizeImplWrapper(const ResizeParam& param, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<TBlob> &outputs) { SizeParam size; if (std::is_same<xpu, gpu>::value) { #if MXNET_USE_CUDA CHECK(param.interp == 1) << "interp should be 1 for using Resize on GPU."; mshadow::Stream<gpu> *s = ctx.get_stream<gpu>(); MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, { if (inputs[0].ndim() == 3) { Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s); Tensor<gpu, 3, DType> output = outputs[0].get<gpu, 3, DType>(s); ResizeImplCUDA<DType, Tensor<gpu, 3, DType>, float>(s, input, output); } else { Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s); Tensor<gpu, 4, DType> output = outputs[0].get<gpu, 4, DType>(s); ResizeImplCUDA<DType, Tensor<gpu, 4, DType>, float>(s, input, output); } }); #endif // MXNET_USE_CUDA } else if (inputs[0].ndim() == 3) { size = GetHeightAndWidth(inputs[0].shape_[H], inputs[0].shape_[W], param); ResizeImpl(inputs, outputs, size.height, size.width, param.interp); } else { size = GetHeightAndWidth(inputs[0].shape_[kH], inputs[0].shape_[kW], param); const auto batch_size = inputs[0].shape_[N]; const auto input_step = inputs[0].shape_[kH] * inputs[0].shape_[kW] * inputs[0].shape_[kC]; const auto output_step = size.height * size.width * inputs[0].shape_[kC]; #pragma omp parallel for for (auto i = 0; i < batch_size; ++i) { ResizeImpl(inputs, outputs, size.height, size.width, param.interp, i * input_step, i * output_step); } } } template <typename xpu> inline void Resize(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { CHECK_EQ(outputs.size(), 1U); const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed); ResizeImplWrapper<xpu>(param, ctx, inputs, outputs); } } // namespace image } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
shingles.c
/* * Simple Stencil example * Main program example * * Brian J Gravelle * gravelle@cs.uoregon.edu * */ #ifdef USE_CALI #include <caliper/cali.h> #endif #ifdef USE_LIKWID #include <likwid-marker.h> #endif #include <stdlib.h> #include <stdio.h> #include <string.h> #include <omp.h> //KERNEL #define TRUE 1 #define FALSE 0 void kernel(double *__restrict__ data, size_t nsize, size_t ntrials, size_t *flops, size_t *bytes_per_elem, size_t *mem_accesses_per_elem); struct Inputs { size_t num_thr; size_t nsize; size_t nreps; char print_info; }; void get_input(int argc, char **argv, struct Inputs* input); // main function int main(int argc, char **argv) { #ifdef USE_LIKWID LIKWID_MARKER_INIT; #endif int err = FALSE; double wall_tot_start, wall_tot_end; double wall_init_start, wall_init_end; double wall_comp_start, wall_comp_end; double wall_free_start, wall_free_end; struct Inputs input; size_t ntrials = 1; size_t flops = 0; size_t tot_flops = 0; size_t bytes_per_elem = 0; size_t mem_accesses_per_elem = 0; double** data; double ai = 0.0; double gflops = 0.0; get_input(argc, argv, &input); omp_set_num_threads(input.num_thr); #ifdef USE_CALI cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS); #pragma omp parallel { cali_set_int(thread_attr, omp_get_thread_num()); } #endif wall_tot_start = wall_init_start = omp_get_wtime(); data = (double **)aligned_alloc(64, input.num_thr*sizeof(double*)); for(size_t tid = 0; tid < input.num_thr; tid++) { data[tid] = (double *)aligned_alloc(64, input.nsize*sizeof(double)); for(size_t j = 0; j < input.nsize; j++) { data[tid][j] = 0.0001; } } if (input.print_info) printf("Running benchmark.......\n"); fflush(stdout); wall_comp_start = wall_init_end = omp_get_wtime(); #pragma omp parallel { #ifdef USE_CALI CALI_MARK_BEGIN("kernel"); #endif size_t tid = omp_get_thread_num(); kernel(data[tid], input.nsize, input.nreps, &flops, &bytes_per_elem, &mem_accesses_per_elem); #ifdef USE_CALI CALI_MARK_END("kernel"); #endif } wall_free_start = wall_comp_end = omp_get_wtime(); free(data); wall_tot_end = wall_free_end = omp_get_wtime(); ai = ((double)flops)/((double)bytes_per_elem*(double)mem_accesses_per_elem); gflops = ((double)flops*(double)input.num_thr*(double)input.nsize*(double)input.nreps)/(wall_comp_end - wall_comp_start); tot_flops = flops*input.nsize;//*input.nreps; if (input.print_info) { printf("\nRan new Shingle with \n\ doubles per thr = %d \n\ flops per iter = %d \n\ flops per thr = %d \n\ Arith Intensity = %f \n\ rep count = %d \n\ thread count = %d \n", input.nsize, flops, tot_flops, ai, input.nreps, input.num_thr); printf("init time: %fs\n", (wall_init_end - wall_init_start)); printf("free time: %fs\n", (wall_free_end - wall_free_start)); printf("\ncomputation time: %fs\n", (wall_comp_end - wall_comp_start)); printf("computation rate: %e FLOP/s\n", gflops); } else { printf("%e\n", gflops); } #ifdef USE_LIKWID LIKWID_MARKER_CLOSE; #endif return err; } void get_input(int argc, char **argv, struct Inputs* input) { int i = 1; input->num_thr = 4; input->nsize = 308224; // 2465792 bytes (308224 doubles) per core on L2 and 3 skylake input->nreps = 5; input->print_info = TRUE; for(i = 1; i < argc; i++) { if ( !(strcmp("-h", argv[i])) || !(strcmp("--help", argv[i])) ) { printf("shingles options:\n"); printf("--threads [] -t [] .......... for number of threads\n"); printf("--size [] -s [] ............. for number of doubles to use per thread\n"); printf("--repititions [] -r [] ...... for number of repititions to perform\n"); printf("--quiet -q .................. to remove output other than computation time\n"); exit(1); } if ( !(strcmp("-t", argv[i])) || !(strcmp("--threads", argv[i])) ) { if (++i < argc){ input->num_thr = atoi(argv[i]); } else { printf("Please include a thread count with that option\n"); exit(1); } } if ( !(strcmp("-s", argv[i])) || !(strcmp("--size", argv[i])) ) { if (++i < argc){ input->nsize = atoi(argv[i]); } else { printf("Please include a number of doubles with that option\n"); exit(1); } } if ( !(strcmp("-r", argv[i])) || !(strcmp("--repititions", argv[i])) ) { if (++i < argc){ input->nreps = atoi(argv[i]); } else { printf("Please include a number of reps with that option\n"); exit(1); } } if ( !(strcmp("-q", argv[i])) || !(strcmp("--quiet", argv[i])) ) { input->print_info = FALSE; } } }
macadam.c
// converted some old code to use our cie1931 arrays // // this will precompute a map suitable to reconstruct MacAdam-style box spectra // from tristimulus input. // the map contains three numbers per pixel: maximum brightness (X+Y+Z), // lambda0 and lambda1. the two wavelengths are the locations of the rising // and the falling edge, respectively. if the falling edge comes before the // rising one, the spectrum is a dip instead of a peak. #define CIE_SAMPLES 95 #define CIE_LAMBDA_MIN 360.0 #define CIE_LAMBDA_MAX 830.0 #define CIE_FINE_SAMPLES ((CIE_SAMPLES - 1) * 3 + 1) #include "details/cie1931.h" #include "core/inpaint.h" #include "core/clip.h" #include "core/half.h" #include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <string.h> #include <assert.h> int main(int argc, char *argv[]) { const int max_l = CIE_SAMPLES*2; int res = 1024; float *buf = calloc(sizeof(float), 4*res*res); int incres = 8.0;//64.0; // enumerate all possible box spectra in the sense of [MacAdam 1935], // all wavelengths l: l0 <= l <= l1 are s(l) = 1, 0 else: #pragma omp parallel for schedule(dynamic) default(shared) for(int iw0=0;iw0<=incres*(max_l/2-1);iw0++) { for(int iw1=iw0+1;iw1<=incres*(max_l-2);iw1++) { const float w0 = iw0/(float)incres; const float w1 = iw1/(float)incres; // compute xy chromaticities: // const int l0 = w0, l1 = w1; // const float f0 = w0-l0, f1 = w1-l1; const float lambda0 = CIE_LAMBDA_MIN + w0 * 5.0; const float lambda1 = CIE_LAMBDA_MIN + w1 * 5.0 > CIE_LAMBDA_MAX ? CIE_LAMBDA_MIN + (w1 - CIE_SAMPLES + 1)*5.0 : CIE_LAMBDA_MIN + w1 * 5.0; double X, Y, Z; X = Y = Z = 0.0; for(int l=0;l<5*CIE_SAMPLES;l++) { const float ll = l / (5.0f*CIE_SAMPLES - 1.0f); const float lambda = CIE_LAMBDA_MIN * (1.0f-ll) + CIE_LAMBDA_MAX * ll; float p = 0.0f; if(lambda0 < lambda1) { // peak if(lambda > lambda0 && lambda <= lambda1) p = 1.0f; } else { // dip if(lambda <= lambda1 || lambda > lambda0) p = 1.0f; } X += p * cie_x[l/5] * 1.0f/106.89; Y += p * cie_y[l/5] * 1.0f/106.89; Z += p * cie_z[l/5] * 1.0f/106.89; } const float b = X+Y+Z; const float x = X/b; const float y = Y/b; // rasterize into map if(b > 1e-4f && x > 0 && y > 0) { const int i = x*res+0.5f, j = y*res+0.5f; if(i>=0&&i<res&&j>=0&&j<res) { float *v = buf + 4*(j*res+i); // const float n = v[3]; // const float t0 = n/(n+1.0), t1 = 1.0/(n+1.0); const float t0 = 0.0f, t1 = 1.0f; v[0] = t0*v[0] + t1*b; v[1] = t0*v[1] + t1*lambda0; v[2] = t0*v[2] + t1*lambda1; v[3] ++ ; } } } } // inpaint/hole filling dt_inpaint_buf_t inpaint_buf = { .dat = buf, .wd = res, .ht = res, .cpp = 4, }; dt_inpaint(&inpaint_buf); // clear out of gamut values again for(int j=0;j<res;j++) for(int i=0;i<res;i++) if(dt_spectrum_outside( (i+.5) / (float)res, (j+.5) / (float)res)) for(int c=0;c<3;c++) buf[4*(j*res+i)+c] = 0.0f; // blur 5x5 to smooth over cmf resolution float *smooth = calloc(sizeof(float), res*res); for(int j=0;j<res;j++) for(int i=0;i<res;i++) { const float wg[] = {1.0f, 4.0f, 6.0f, 4.0f, 1.0f}; float weight = 0.0f; // for(int jj=-2;jj<=2;jj++) for(int ii=-2;ii<=2;ii++) for(int jj=-4;jj<=4;jj+=2) for(int ii=-4;ii<=4;ii+=2) // even smoother { if(j+jj >= 0 && j+jj < res && i+ii >= 0 && i+ii < res) { // float w = wg[jj+2]*wg[ii+2]; float w = wg[jj/2+2]*wg[ii/2+2]; smooth[j*res+i] += w * buf[4*((j+jj)*res+i+ii)]; weight += w; } } if(weight > 0.0f) smooth[j*res+i] /= weight; } // write 1 channel half lut: uint32_t size = sizeof(uint16_t)*res*res; uint16_t *b16 = malloc(size); for(int k=0;k<res*res;k++) b16[k] = float_to_half(smooth[k]); typedef struct header_t { uint32_t magic; uint16_t version; uint8_t channels; uint8_t datatype; uint32_t wd; uint32_t ht; } header_t; header_t head = (header_t) { .magic = 1234, .version = 2, .channels = 1, .datatype = 0, .wd = res, .ht = res, }; FILE *f = fopen("macadam.lut", "wb"); if(f) { fwrite(&head, sizeof(head), 1, f); fwrite(b16, size, 1, f); fclose(f); } #if 0 // debug, can look at this with eu: FILE *pfm = fopen("macadam.pfm", "wb"); if(pfm) { fprintf(pfm, "PF\n%d %d\n-1.0\n", res, res); for(int k=0;k<res*res;k++) for(int c=0;c<3;c++) fwrite(smooth+k, sizeof(float), 1, pfm); fclose(pfm); } #endif free(b16); free(smooth); exit(0); }
pi_omp_teams.c
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. History: Written by Tim Mattson, 11/99. Configure eclipse based: https://medium.com/swlh/openmp-on-ubuntu-1145355eeb2 */ #include <omp.h> #include <stdio.h> #include <unistd.h> #include <stdint.h> #include <stdlib.h> // Declare variables uint32_t num_steps_teams = 100000000; uint32_t teams_number = 2; uint32_t max_num_threads = 2; double step; // Constant for help on usage static char usage[] = "usage: %s [-h] [-a] -t teams_number -n thread_number\n" "-t Maximum number of teams.\n" "-n Maximum number of threads.\n" "-a displays the information of the authors of the program.\n" "-h displays the usage message to let the user know how to execute the application.\n"; // Internal functions declarations double pi_opm_teams(uint32_t num_steps, uint32_t teams_number, uint32_t max_num_threads); void check_required_inputs(int i_flag, int o_flag, char *prog_name); // Main code int main(int argc, char **argv) { // Use flags below to tell if the required arguments were provided int t_flag = 0; int n_flag = 0; // To save number of threads to use uint32_t max_num_threads; // To save number of teams to use uint32_t teams_number; int c; while ((c = getopt(argc, argv, "hat:n:")) != -1) switch (c){ case 't': t_flag = 1; teams_number = atoi(optarg); break; case 'n': n_flag = 1; max_num_threads = atoi(optarg); break; case 'h': fprintf(stderr, usage, argv[0]); exit(1); break; case 'a': printf("Authors: agomez and rcespedes\n"); exit(1); break; case ':': break; case '?': fprintf (stderr, "Unknown option `-%c'.\n", optopt); return 1; default: abort(); } // Check required arguments were provided check_required_inputs(t_flag, n_flag, argv[0]); // number of teams to run num_steps_teams = 100000000; printf("Configured to use -Teams number: %d | -Max thread number: %d\n",teams_number, max_num_threads); pi_opm_teams(num_steps_teams, 8, 8); return 0; } // Function to check user provided the required arguments void check_required_inputs(int t_flag, int n_flag, char *prog_name){ // Check required arguments were provided. Print error and abort otherwise if (!t_flag){ fprintf(stderr, "%s: missing -t option\n", prog_name); fprintf(stderr, usage, prog_name); exit(1); } if (!n_flag){ fprintf(stderr, "%s: missing -n option\n", prog_name); fprintf(stderr, usage, prog_name); exit(1); } } double pi_opm_teams(uint32_t num_steps, uint32_t teams_number, uint32_t max_num_threads){ // Declare Internal Variables uint32_t i, used_teams, used_threads = 0; double x, pi, sum = 0.0; double start_time, run_time; // Calcualte step size step = 1.0 / (double)num_steps; // get start time start_time = omp_get_wtime(); // Make for loop parallel with the teams construct and a thread_limit // Make for loop parallel with the teams construct // Set reduction pragma to optimize sum operation and make the x variable private between threads #pragma omp teams num_teams(teams_number) thread_limit(max_num_threads) used_teams = omp_get_num_teams(); used_threads = omp_get_num_threads(); // the reduction operation makes the variable private and causes the system to perform a reduction optimization at the end of the parallel region. Also, uses the distribute construct to evenly distribute the loop executions among threads #pragma omp distribute parallel for reduction(+:sum) private (x) for (i = 1; i <= num_steps; i++) { x = (i - 0.5) * step; sum = sum + 4.0 / (1.0 + x * x); } pi = step * sum; run_time = omp_get_wtime() - start_time; printf("Current settings -Teams used number: %d | -Threads used: %d \n",used_teams, used_threads); printf("pi teams implementation with %d steps is %lf in %.12lf seconds\n", num_steps, pi, run_time); return pi; }
threadprivate2.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int counter=0; #ifdef _OPENMP #pragma omp threadprivate(counter) #endif int main(void) { int i; #pragma omp parallel for for(i=0;i<10000;i++) counter++; #pragma omp parallel for for(i=0;i<10000;i++) counter+=3; #pragma omp parallel printf("counter=%d\n",counter); return 0; }
VolumetricConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "THNN/generic/VolumetricConvolutionMM.c" #else #include <ATen/div_rtn.h> #define CONV3D_OMP_THRESHOLD 20 static void inline THNN_(VolumetricConvolutionMM_shapeCheck)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *weight, THTensor *bias, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int weight_nullable) { THNN_ARGCHECK(!input->is_empty() && (input->dim() == 4 || input->dim() == 5), 2, input, "non-empty 4D or 5D (batch mode) tensor expected for input, but got: %s"); THArgCheck(kT > 0 && kW > 0 && kH > 0, 8, "kernel size should be greater than zero, but got kT: %d kH: %d kW: %d", kT, kH, kW); THArgCheck(dT > 0 && dW > 0 && dH > 0, 11, "stride should be greater than zero, but got dT: %d dH: %d dW: %d", dT, dH, dW); if (weight != NULL) { THNN_ARGCHECK(!weight->is_empty() && (weight->dim() == 2 || weight->dim() == 5), 5, weight, "non-empty 2D or 5D weight tensor expected, but got: %s"); if (bias != NULL) { THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size(0)); } } else if (!weight_nullable) { THError("weight tensor is expected to be non-nullable"); } int ndim = input->dim(); int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimt++; dimh++; dimw++; } int64_t inputDepth; int64_t inputHeight; int64_t inputWidth; int64_t exactInputDepth; int64_t exactInputHeight; int64_t exactInputWidth; int64_t outputDepth; int64_t outputHeight; int64_t outputWidth; inputDepth = input->size(dimt); inputHeight = input->size(dimh); inputWidth = input->size(dimw); exactInputDepth = inputDepth + 2*pT; exactInputHeight = inputHeight + 2*pH; exactInputWidth = inputWidth + 2*pW; if (exactInputDepth < kT || exactInputHeight < kH || exactInputWidth < kW) { THError("Calculated padded input size per channel: (%ld x %ld x %ld). " "Kernel size: (%d x %d x %d). Kernel size can't be greater than actual input size", exactInputDepth, exactInputHeight, exactInputWidth, kT, kH, kW); } outputDepth = div_rtn<int64_t>(exactInputDepth - kT, dT) + 1; outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1; outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1; if (outputDepth < 1 || outputWidth < 1 || outputHeight < 1) { THError("Given input size per channel: (%ld x %ld x %ld). " "Calculated output size per channel: (%ld x %ld x %ld). Output size is too small", inputDepth, inputHeight, inputWidth, outputDepth, outputHeight, outputWidth); } if (weight != NULL) { int64_t nInputPlane = weight->size(1); if (weight->dim() == 2) { nInputPlane /= (kT * kH * kW); } THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane); } if (gradOutput != NULL) { if (weight != NULL) { int64_t nOutputPlane = weight->size(0); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); } else if (bias != NULL) { int64_t nOutputPlane = THTensor_sizeLegacyNoScalars(bias, 0); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane); } THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, outputDepth); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight); THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth); } } static THTensor* THNN_(newViewWeight)(THTensor *weight) { weight = THTensor_(newContiguous)(weight); if (weight->dim() == 5) { int64_t s1 = weight->size(0); int64_t s2 = weight->size(1) * weight->size(2) * weight->size(3) * weight->size(4); THTensor *old_weight = weight; weight = THTensor_(newWithStorage2d)(THTensor_getStoragePtr(weight), weight->storage_offset(), s1, -1, s2, -1); c10::raw::intrusive_ptr::decref(old_weight); } return weight; } // Kernel for fast unfold+copy // Borrowed from Theano // Authors: Arjun Jain, Frédéric Bastien, Jan Schlüter, Nicolas Ballas static void THNN_(unfolded_acc_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int64_t nInputPlane, int64_t inputDepth, int64_t inputWidth, int64_t inputHeight, int64_t outputDepth, int64_t outputWidth, int64_t outputHeight) { scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); #ifdef _OPENMP int inOmp = omp_in_parallel(); #pragma omp parallel if (!inOmp) firstprivate(finput_data, input_data, outputWidth, outputHeight, outputDepth, kW, kH, kT, dW, dH, dT, pW, pH, pT, nInputPlane, inputHeight, inputWidth, inputDepth) { size_t num_threads = omp_get_num_threads(); size_t tid = omp_get_thread_num(); int64_t n = nInputPlane * inputHeight * inputWidth * inputDepth; int64_t seg_len_tmp = n / num_threads; int64_t line_index_offset = tid * seg_len_tmp; int64_t line_seg_len = (tid == num_threads - 1)? (n-line_index_offset) : seg_len_tmp; int64_t w = line_index_offset % inputWidth + pW; int64_t h_index = line_index_offset / inputWidth; int64_t h = h_index % inputHeight + pH; int64_t d_index = h_index / inputHeight; int64_t d = d_index % inputDepth + pT; int64_t c = d_index / inputDepth; #else int64_t line_seg_len = nInputPlane * inputHeight * inputWidth * inputDepth; int64_t line_index_offset = 0; int64_t w = pW; int64_t h = pH; int64_t d = pT; int64_t c = 0;; #endif int64_t outputHW = outputHeight * outputWidth; int64_t outputDHW = outputDepth * outputHW; int64_t kHkW = kH*kW; int64_t kTkHkW = kT*kHkW; int64_t coeff_d_col = outputHW - dT * kHkW * outputDHW; int64_t coeff_h_col = outputWidth - dH * kW * outputDHW; int64_t coeff_w_col = (1 - dW * outputDHW); int64_t count = 0; while (count < line_seg_len) { // compute the start and end of the output int64_t w_col_start = (w < kW) ? 0 : (w - kW) / dW + 1; int64_t w_col_tmp = w / dW + 1; int64_t w_col_end = w_col_tmp < outputWidth? w_col_tmp : outputWidth; int64_t h_col_start = (h < kH) ? 0 : (h - kH) / dH + 1; int64_t h_col_tmp = h / dH + 1; int64_t h_col_end = h_col_tmp < outputHeight? h_col_tmp : outputHeight; int64_t d_col_start = (d < kT) ? 0 : (d - kT) / dT + 1; int64_t d_col_tmp = d / dT + 1; int64_t d_col_end = d_col_tmp < outputDepth? d_col_tmp : outputDepth; scalar_t val = 0; int64_t offset = (c * kTkHkW + d * kHkW + h * kW + w) * outputDHW; int64_t offset_w_col_start = w_col_start * coeff_w_col; int64_t offset_d_col_start = d_col_start * coeff_d_col; int64_t offset_h_col_start = h_col_start * coeff_h_col; int64_t offset_w_col = offset_w_col_start + offset; int64_t offset_d_col; int64_t offset_h_col; int64_t w_col, d_col, h_col; for (w_col = w_col_start; w_col < w_col_end; ++w_col) { offset_d_col = offset_d_col_start + offset_w_col; for (d_col = d_col_start; d_col < d_col_end; ++d_col) { offset_h_col = offset_h_col_start + offset_d_col; for (h_col = h_col_start; h_col < h_col_end; ++h_col) { val += finput_data[offset_h_col]; offset_h_col += coeff_h_col; } offset_d_col += coeff_d_col; } offset_w_col += coeff_w_col; } input_data[line_index_offset+count] = val; count++; if (count < line_seg_len) { if (w - pW + 1 == inputWidth) { w = pW; if (h - pH + 1 == inputHeight) { h = pH; if (d - pT + 1 == inputDepth) { d = pT; c++; } else d++; } else h++; } else w++; } } #ifdef _OPENMP } #endif } /* Modified from the version of CUDA implementation, but the loop iterations is larger than that one. The larger loop could lower the proportion of openmp overhead. And the inner part in loop is simpler. The naive code is below: scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); int64_t n = nInputPlane*kT*kH*kW*outputDepth*outputWidth*outputHeight; #pragma omp parallel for firstprivate(finput_data, input_data, outputWidth, outputHeight, outputDepth, kW, kH, kT, dW, dH, dT, pW, pH, pT, inputHeight, inputWidth, inputDepth) for (int64_t idx = 0; idx < n ; ++idx) { int64_t w_out = line_index_offset % outputWidth; int64_t remained = line_index_offset / outputWidth; int64_t h_out = remained % outputHeight; remained /= outputHeight; int64_t d_out = remained % outputDepth; remained /= outputDepth; int k = remained % kW; remained /= kW; int j = remained % kH; remained /= kH; int i = remained % kT; int64_t nip = remained / kT; int64_t d = d_out * dT - pT + i; int64_t h = h_out * dH - pH + j; int64_t w = w_out * dW - pW + k; finput_data[idx] = (h >= 0 && w >= 0 && d >= 0 && h < inputHeight && w < inputWidth && d < inputDepth) ? input_data[nip*inputDepth*inputWidth*inputHeight+ d*inputHeight*inputWidth + h*inputWidth + w] : 0; } However, there are 6 quotient and 6 module operations which are very time-consuming. So we choose relatively more complex but more efficient pattern. */ static void THNN_(unfolded_copy_vol)( THTensor *finput, THTensor *input, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int64_t nInputPlane, int64_t inputDepth, int64_t inputWidth, int64_t inputHeight, int64_t outputDepth, int64_t outputWidth, int64_t outputHeight) { scalar_t *input_data = input->data<scalar_t>(); scalar_t *finput_data = finput->data<scalar_t>(); #ifdef _OPENMP int inOmp = omp_in_parallel(); #pragma omp parallel if (!inOmp) firstprivate(finput_data, input_data, outputWidth, outputHeight, outputDepth, kW, kH, kT, dW, dH, dT, pW, pH, pT, nInputPlane, inputHeight, inputWidth, inputDepth) { size_t num_threads = omp_get_num_threads(); size_t tid = omp_get_thread_num(); int64_t n = nInputPlane*kT*kH*kW*outputDepth*outputWidth*outputHeight; int64_t seg_len_tmp = n / num_threads; int64_t line_index_offset = tid * seg_len_tmp; int64_t line_seg_len = (tid == num_threads - 1)? (n-line_index_offset) : seg_len_tmp; int64_t w_out = line_index_offset % outputWidth; int64_t remained = line_index_offset / outputWidth; int64_t h_out = remained % outputHeight; remained /= outputHeight; int64_t d_out = remained % outputDepth; remained /= outputDepth; int k = remained % kW; remained /= kW; int j = remained % kH; remained /= kH; int i = remained % kT; int64_t nip = remained / kT; #else int64_t line_seg_len = nInputPlane*kT*kH*kW*outputDepth*outputWidth*outputHeight; int64_t line_index_offset = 0; int64_t w_out = 0; int64_t h_out = 0; int64_t d_out = 0; int i = 0; int j = 0; int k = 0; int64_t nip = 0; #endif int64_t count = 0; scalar_t* dst = finput_data + line_index_offset; int64_t inputHW = inputHeight*inputWidth; int64_t inputDHW = inputHW*inputDepth; while (count < line_seg_len) { int64_t w = w_out * dW - pW + k; int64_t h = h_out * dH - pH + j; int64_t d = d_out * dT - pT + i; *dst = (h >= 0 && w >= 0 && d >= 0 && h < inputHeight && w < inputWidth && d < inputDepth) ? input_data[nip*inputDHW+ d*inputHW + h*inputWidth + w] : 0; count++; if (count < line_seg_len) { dst++; w_out++; if (w_out == outputWidth) { w_out = 0; h_out++; if (h_out == outputHeight) { h_out = 0; d_out++; if (d_out == outputDepth) { d_out = 0; k++; if(k == kW) { k = 0; j++; if(j == kH) { j = 0; i++; if(i == kT) { i = 0; nip++; } } } } } } } } #ifdef _OPENMP } #endif } static void THNN_(VolumetricConvolutionMM_updateOutput_frame)( THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, int64_t nInputPlane, int64_t inputDepth, int64_t inputWidth, int64_t inputHeight, int64_t nOutputPlane, int64_t outputDepth, int64_t outputWidth, int64_t outputHeight) { int64_t i; THTensor *output2d; THNN_(unfolded_copy_vol)( finput, input, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, outputDepth, outputWidth, outputHeight ); output2d = THTensor_(newWithStorage2d)( THTensor_getStoragePtr(output), output->storage_offset(), nOutputPlane, -1, outputDepth*outputHeight*outputWidth, -1 ); if (bias) { for (i = 0; i < nOutputPlane; i++) { THVector_(fill)( THStorage_(data)(THTensor_getStoragePtr(output))+output->storage_offset()+output->stride(0)*i, THTensor_(get1d)(bias, i), outputDepth*outputHeight*outputWidth ); } } else { THTensor_(zero)(output); } THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); c10::raw::intrusive_ptr::decref(output2d); } void THNN_(VolumetricConvolutionMM_updateOutput)( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, // unused int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { int dimf = 0; int dimt = 1; int dimh = 2; int dimw = 3; int64_t nInputPlane; int64_t inputDepth; int64_t inputHeight; int64_t inputWidth; int64_t nOutputPlane; int64_t outputDepth; int64_t outputHeight; int64_t outputWidth; THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, NULL, weight, bias, kT, kW, kH, dT, dW, dH, pT, pW, pH, 0); input = THTensor_(newContiguous)(input); if (input->dim() == 5) { dimf++; dimt++; dimh++; dimw++; } nInputPlane = input->size(dimf); inputDepth = input->size(dimt); inputHeight = input->size(dimh); inputWidth = input->size(dimw); nOutputPlane = weight->size(0); outputDepth = (inputDepth + 2*pT - kT) / dT + 1; outputHeight = (inputHeight + 2*pH - kH) / dH + 1; outputWidth = (inputWidth + 2*pW - kW) / dW + 1; weight = THNN_(newViewWeight)(weight); if (input->dim() == 4) { THTensor_(resize2d)(finput, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize4d)(output, nOutputPlane, outputDepth, outputHeight, outputWidth); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input, output, weight, bias, finput, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); } else { int64_t T = input->size(0); int64_t t; THTensor_(resize3d)(finput, T, kT*kW*kH*nInputPlane, outputDepth*outputHeight*outputWidth); THTensor_(resize5d)(output, T, nOutputPlane, outputDepth, outputHeight, outputWidth); #ifdef _OPENMP #pragma omp parallel for if(T > CONV3D_OMP_THRESHOLD) private(t) #endif for (t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); THNN_(VolumetricConvolutionMM_updateOutput_frame)( input_t, output_t, weight, bias, finput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH, nInputPlane, inputDepth, inputWidth, inputHeight, nOutputPlane, outputDepth, outputWidth, outputHeight ); c10::raw::intrusive_ptr::decref(input_t); c10::raw::intrusive_ptr::decref(output_t); c10::raw::intrusive_ptr::decref(finput_t); } } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(weight); } static void THNN_(VolumetricConvolutionMM_updateGradInput_frame)( THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, gradOutput->size(1)*gradOutput->size(2)*gradOutput->size(3), -1 ); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); c10::raw::intrusive_ptr::decref(gradOutput2d); THTensor_(zero)(gradInput); THNN_(unfolded_acc_vol)( fgradInput, gradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH, gradInput->size(0), gradInput->size(1), gradInput->size(3), gradInput->size(2), gradOutput->size(1), gradOutput->size(3), gradOutput->size(2) ); } void THNN_(VolumetricConvolutionMM_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH) { THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, weight, NULL, kT, kW, kH, dT, dW, dH, pT, pW, pH, 0); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); weight = THNN_(newViewWeight)(weight); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); // depending on the BLAS library, fgradInput (result tensor) might // be left uninitialized on zero alpha, which might lead to weird behavior // hence, to be safe, zero it THTensor_(zero)(fgradInput); THTensor *tweight = THTensor_(new)(); THTensor_(transpose)(tweight, weight, 0, 1); if (input->dim() == 4) { THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput, gradOutput, tweight, fgradInput, kT, kW, kH, dT, dW, dH, pT, pW, pH ); } else { int64_t T = input->size(0); int64_t t; #ifdef _OPENMP #pragma omp parallel for if(T > CONV3D_OMP_THRESHOLD) private(t) #endif for (t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); THNN_(VolumetricConvolutionMM_updateGradInput_frame)( gradInput_t, gradOutput_t, tweight, fgradInput_t, kT, kW, kH, dT, dW, dH, pT, pW, pH ); c10::raw::intrusive_ptr::decref(gradInput_t); c10::raw::intrusive_ptr::decref(gradOutput_t); c10::raw::intrusive_ptr::decref(fgradInput_t); } } c10::raw::intrusive_ptr::decref(tweight); c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); c10::raw::intrusive_ptr::decref(weight); } static void THNN_(VolumetricConvolutionMM_accGradParameters_frame)( THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, // can be NULL if gradWeight = NULL scalar_t scale) { int64_t i; THTensor *gradOutput2d = THTensor_(newWithStorage2d)( THTensor_getStoragePtr(gradOutput), gradOutput->storage_offset(), gradOutput->size(0), -1, gradOutput->size(1)*gradOutput->size(2)*gradOutput->size(3), -1 ); if (gradWeight){ THTensor *tfinput = THTensor_(new)(); THTensor_(transpose)(tfinput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput); c10::raw::intrusive_ptr::decref(tfinput); } if (gradBias) { for (i = 0; i < THTensor_sizeLegacyNoScalars(gradBias, 0); i++) { int64_t k; scalar_t sum = 0; scalar_t *data = THStorage_(data)(THTensor_getStoragePtr(gradOutput2d)) + gradOutput2d->storage_offset() + i*gradOutput2d->stride(0); for (k = 0; k < gradOutput2d->size(1); k++) sum += data[k]; (THStorage_(data)(THTensor_getStoragePtr(gradBias)) + gradBias->storage_offset())[i] += scale * sum; } } c10::raw::intrusive_ptr::decref(gradOutput2d); } void THNN_(VolumetricConvolutionMM_accGradParameters)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, accreal scale_) { scalar_t scale = TH_CONVERT_ACCREAL_TO_REAL(scale_); THNN_(VolumetricConvolutionMM_shapeCheck)( state, input, gradOutput, gradWeight, gradBias, kT, kW, kH, dT, dW, dH, pT, pW, pH, 1); input = THTensor_(newContiguous)(input); gradOutput = THTensor_(newContiguous)(gradOutput); if (gradWeight) { gradWeight = THNN_(newViewWeight)(gradWeight); } if (input->dim() == 4) // non-batch mode { THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else // batch mode { int64_t T = input->size(0); int64_t t; #ifdef _OPENMP #pragma omp parallel for if(T > CONV3D_OMP_THRESHOLD) private(t) #endif for (t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = NULL; if (gradWeight) { finput_t = THTensor_(newSelect)(finput, 0, t); } THNN_(VolumetricConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); c10::raw::intrusive_ptr::decref(gradOutput_t); if (gradWeight) { c10::raw::intrusive_ptr::decref(finput_t); } } } c10::raw::intrusive_ptr::decref(input); c10::raw::intrusive_ptr::decref(gradOutput); if (gradWeight) { c10::raw::intrusive_ptr::decref(gradWeight); } } #endif
stats.c
//----------------------------------------------------------------------------- // stats.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/20/14 (Build 5.1.001) // 09/15/14 (Build 5.1.007) // 03/19/15 (Build 5.1.008) // 08/01/16 (Build 5.1.011) // 03/14/17 (Build 5.1.012) // Author: L. Rossman (EPA) // R. Dickinson (CDM) // // Simulation statistics functions. // // Build 5.1.007: // - Exfiltration losses added to storage node statistics. // // Build 5.1.008: // - Support for updating groundwater statistics added. // - Support for updating maximum reported nodal depths added. // - OpenMP parallelization applied to updating node and link flow statistics. // - Updating of time that conduit is upstrm/dnstrm full was modified. // // Build 5.1.011: // - Surcharging is now evaluated only under dynamic wave flow routing and // storage nodes cannot be classified as surcharged. // // Build 5.1.012: // - Time step statistics now evaluated only in non-steady state periods. // - Check for full conduit flow now accounts for number of barrels. // //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include <stdlib.h> #include <math.h> #include <omp.h> //(5.1.008) #include "headers.h" //----------------------------------------------------------------------------- // Shared variables //----------------------------------------------------------------------------- #define MAX_STATS 5 static TSysStats SysStats; static TMaxStats MaxMassBalErrs[MAX_STATS]; static TMaxStats MaxCourantCrit[MAX_STATS]; static TMaxStats MaxFlowTurns[MAX_STATS]; static double SysOutfallFlow; //----------------------------------------------------------------------------- // Exportable variables (shared with statsrpt.c) //----------------------------------------------------------------------------- TSubcatchStats* SubcatchStats; TNodeStats* NodeStats; TLinkStats* LinkStats; TStorageStats* StorageStats; TOutfallStats* OutfallStats; TPumpStats* PumpStats; double MaxOutfallFlow; double MaxRunoffFlow; //----------------------------------------------------------------------------- // Imported variables //----------------------------------------------------------------------------- extern double* NodeInflow; // defined in massbal.c extern double* NodeOutflow; // defined in massbal.c //----------------------------------------------------------------------------- // External functions (declared in funcs.h) //----------------------------------------------------------------------------- // stats_open (called from swmm_start in swmm5.c) // stats_close (called from swmm_end in swmm5.c) // stats_report (called from swmm_end in swmm5.c) // stats_updateSubcatchStats (called from subcatch_getRunoff) // stats_updateGwaterStats (called from gwater_getGroundwater) //(5.1.008) // stats_updateFlowStats (called from routing_execute) // stats_updateCriticalTimeCount (called from getVariableStep in dynwave.c) // stats_updateMaxNodeDepth (called from output_saveNodeResults) //(5.1.008) //----------------------------------------------------------------------------- // Local functions //----------------------------------------------------------------------------- static void stats_updateNodeStats(int node, double tStep, DateTime aDate); static void stats_updateLinkStats(int link, double tStep, DateTime aDate); static void stats_findMaxStats(void); static void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x); //============================================================================= int stats_open() // // Input: none // Output: returns an error code // Purpose: opens the simulation statistics system. // { int j, k; // --- set all pointers to NULL NodeStats = NULL; LinkStats = NULL; StorageStats = NULL; OutfallStats = NULL; PumpStats = NULL; // --- allocate memory for & initialize subcatchment statistics SubcatchStats = NULL; if ( Nobjects[SUBCATCH] > 0 ) { SubcatchStats = (TSubcatchStats *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatchStats)); if ( !SubcatchStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } for (j=0; j<Nobjects[SUBCATCH]; j++) { SubcatchStats[j].precip = 0.0; SubcatchStats[j].runon = 0.0; SubcatchStats[j].evap = 0.0; SubcatchStats[j].infil = 0.0; SubcatchStats[j].runoff = 0.0; SubcatchStats[j].maxFlow = 0.0; } //// Added to release 5.1.008. //// //(5.1.008) //// for (j=0; j<Nobjects[SUBCATCH]; j++) { if ( Subcatch[j].groundwater == NULL ) continue; Subcatch[j].groundwater->stats.avgUpperMoist = 0.0; Subcatch[j].groundwater->stats.avgWaterTable = 0.0; Subcatch[j].groundwater->stats.infil = 0.0; Subcatch[j].groundwater->stats.latFlow = 0.0; Subcatch[j].groundwater->stats.deepFlow = 0.0; Subcatch[j].groundwater->stats.evap = 0.0; Subcatch[j].groundwater->stats.maxFlow = 0.0; } //// } // --- allocate memory for node & link stats if ( Nobjects[LINK] > 0 ) { NodeStats = (TNodeStats *) calloc(Nobjects[NODE], sizeof(TNodeStats)); LinkStats = (TLinkStats *) calloc(Nobjects[LINK], sizeof(TLinkStats)); if ( !NodeStats || !LinkStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } } // --- initialize node stats if ( NodeStats ) for ( j = 0; j < Nobjects[NODE]; j++ ) { NodeStats[j].avgDepth = 0.0; NodeStats[j].maxDepth = 0.0; NodeStats[j].maxDepthDate = StartDateTime; NodeStats[j].maxRptDepth = 0.0; //(5.1.008) NodeStats[j].volFlooded = 0.0; NodeStats[j].timeFlooded = 0.0; NodeStats[j].timeSurcharged = 0.0; NodeStats[j].timeCourantCritical = 0.0; NodeStats[j].totLatFlow = 0.0; NodeStats[j].maxLatFlow = 0.0; NodeStats[j].maxInflow = 0.0; NodeStats[j].maxOverflow = 0.0; NodeStats[j].maxPondedVol = 0.0; NodeStats[j].maxInflowDate = StartDateTime; NodeStats[j].maxOverflowDate = StartDateTime; } // --- initialize link stats if ( LinkStats ) for ( j = 0; j < Nobjects[LINK]; j++ ) { LinkStats[j].maxFlow = 0.0; LinkStats[j].maxVeloc = 0.0; LinkStats[j].maxDepth = 0.0; LinkStats[j].timeSurcharged = 0.0; LinkStats[j].timeFullUpstream = 0.0; LinkStats[j].timeFullDnstream = 0.0; LinkStats[j].timeFullFlow = 0.0; LinkStats[j].timeCapacityLimited = 0.0; LinkStats[j].timeCourantCritical = 0.0; for (k=0; k<MAX_FLOW_CLASSES; k++) LinkStats[j].timeInFlowClass[k] = 0.0; LinkStats[j].flowTurns = 0; LinkStats[j].flowTurnSign = 0; } // --- allocate memory for & initialize storage unit statistics if ( Nnodes[STORAGE] > 0 ) { StorageStats = (TStorageStats *) calloc(Nnodes[STORAGE], sizeof(TStorageStats)); if ( !StorageStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( k = 0; k < Nobjects[NODE]; k++ ) { if ( Node[k].type != STORAGE ) continue; j = Node[k].subIndex; StorageStats[j].initVol = Node[k].newVolume; StorageStats[j].avgVol = 0.0; StorageStats[j].maxVol = 0.0; StorageStats[j].maxFlow = 0.0; StorageStats[j].evapLosses = 0.0; StorageStats[j].exfilLosses = 0.0; //(5.1.007) StorageStats[j].maxVolDate = StartDateTime; } } // --- allocate memory for & initialize outfall statistics if ( Nnodes[OUTFALL] > 0 ) { OutfallStats = (TOutfallStats *) calloc(Nnodes[OUTFALL], sizeof(TOutfallStats)); if ( !OutfallStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( j = 0; j < Nnodes[OUTFALL]; j++ ) { OutfallStats[j].avgFlow = 0.0; OutfallStats[j].maxFlow = 0.0; OutfallStats[j].totalPeriods = 0; if ( Nobjects[POLLUT] > 0 ) { OutfallStats[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); if ( !OutfallStats[j].totalLoad ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } for (k=0; k<Nobjects[POLLUT]; k++) OutfallStats[j].totalLoad[k] = 0.0; } else OutfallStats[j].totalLoad = NULL; } } // --- allocate memory & initialize pumping statistics if ( Nlinks[PUMP] > 0 ) { PumpStats = (TPumpStats *) calloc(Nlinks[PUMP], sizeof(TPumpStats)); if ( !PumpStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( j = 0; j < Nlinks[PUMP]; j++ ) { PumpStats[j].utilized = 0.0; PumpStats[j].minFlow = 0.0; PumpStats[j].avgFlow = 0.0; PumpStats[j].maxFlow = 0.0; PumpStats[j].volume = 0.0; PumpStats[j].energy = 0.0; PumpStats[j].startUps = 0; PumpStats[j].offCurveLow = 0.0; PumpStats[j].offCurveHigh = 0.0; } } // --- initialize system stats MaxRunoffFlow = 0.0; MaxOutfallFlow = 0.0; SysStats.maxTimeStep = 0.0; SysStats.minTimeStep = RouteStep; SysStats.avgTimeStep = 0.0; SysStats.avgStepCount = 0.0; SysStats.steadyStateCount = 0.0; return 0; } //============================================================================= void stats_close() // // Input: none // Output: // Purpose: closes the simulation statistics system. // { int j; FREE(SubcatchStats); FREE(NodeStats); FREE(LinkStats); FREE(StorageStats); if ( OutfallStats ) { for ( j=0; j<Nnodes[OUTFALL]; j++ ) FREE(OutfallStats[j].totalLoad); FREE(OutfallStats); } FREE(PumpStats); } //============================================================================= void stats_report() // // Input: none // Output: none // Purpose: reports simulation statistics. // { // --- report flow routing accuracy statistics if ( Nobjects[LINK] > 0 && RouteModel != NO_ROUTING ) { stats_findMaxStats(); report_writeMaxStats(MaxMassBalErrs, MaxCourantCrit, MAX_STATS); report_writeMaxFlowTurns(MaxFlowTurns, MAX_STATS); report_writeSysStats(&SysStats); } // --- report summary statistics statsrpt_writeReport(); } //============================================================================= void stats_updateSubcatchStats(int j, double rainVol, double runonVol, double evapVol, double infilVol, double runoffVol, double runoff) // // Input: j = subcatchment index // rainVol = rainfall + snowfall volume (ft3) // runonVol = runon volume from other subcatchments (ft3) // evapVol = evaporation volume (ft3) // infilVol = infiltration volume (ft3) // runoffVol = runoff volume (ft3) // runoff = runoff rate (cfs) // Output: none // Purpose: updates totals of runoff components for a specific subcatchment. // { SubcatchStats[j].precip += rainVol; SubcatchStats[j].runon += runonVol; SubcatchStats[j].evap += evapVol; SubcatchStats[j].infil += infilVol; SubcatchStats[j].runoff += runoffVol; SubcatchStats[j].maxFlow = MAX(SubcatchStats[j].maxFlow, runoff); } //============================================================================= //// New function added to release 5.1.008. //// //(5.1.008) void stats_updateGwaterStats(int j, double infil, double evap, double latFlow, double deepFlow, double theta, double waterTable, double tStep) { Subcatch[j].groundwater->stats.infil += infil * tStep; Subcatch[j].groundwater->stats.evap += evap * tStep; Subcatch[j].groundwater->stats.latFlow += latFlow * tStep; Subcatch[j].groundwater->stats.deepFlow += deepFlow * tStep; Subcatch[j].groundwater->stats.avgUpperMoist += theta * tStep; Subcatch[j].groundwater->stats.avgWaterTable += waterTable * tStep; Subcatch[j].groundwater->stats.finalUpperMoist = theta; Subcatch[j].groundwater->stats.finalWaterTable = waterTable; if ( fabs(latFlow) > fabs(Subcatch[j].groundwater->stats.maxFlow) ) { Subcatch[j].groundwater->stats.maxFlow = latFlow; } } //============================================================================= void stats_updateMaxRunoff() // // Input: none // Output: updates global variable MaxRunoffFlow // Purpose: updates value of maximum system runoff rate. // { int j; double sysRunoff = 0.0; for (j=0; j<Nobjects[SUBCATCH]; j++) sysRunoff += Subcatch[j].newRunoff; MaxRunoffFlow = MAX(MaxRunoffFlow, sysRunoff); } //============================================================================= //// New function added for release 5.1.008. //// //(5.1.008) void stats_updateMaxNodeDepth(int j, double depth) // // Input: j = node index // depth = water depth at node at current reporting time (ft) // Output: none // Purpose: updates a node's maximum depth recorded at reporting times. // { if ( NodeStats != NULL ) NodeStats[j].maxRptDepth = MAX(NodeStats[j].maxRptDepth, depth); } //============================================================================= void stats_updateFlowStats(double tStep, DateTime aDate, int stepCount, int steadyState) // // Input: tStep = routing time step (sec) // aDate = current date/time // stepCount = # steps required to solve routing at current time period // steadyState = TRUE if steady flow conditions exist // Output: none // Purpose: updates various flow routing statistics at current time period. // { int j; // --- update stats only after reporting period begins if ( aDate < ReportStart ) return; SysOutfallFlow = 0.0; // --- update node & link stats #pragma omp parallel num_threads(NumThreads) //(5.1.008) { #pragma omp for //(5.1.008) for ( j=0; j<Nobjects[NODE]; j++ ) stats_updateNodeStats(j, tStep, aDate); #pragma omp for //(5.1.008) for ( j=0; j<Nobjects[LINK]; j++ ) stats_updateLinkStats(j, tStep, aDate); } //// Following code segment modified for release 5.1.012. //// //(5.1.012) // --- update count of times in steady state SysStats.steadyStateCount += steadyState; // --- update time step stats if not in steady state if ( steadyState == FALSE ) { // --- skip initial time step for min. value) if ( OldRoutingTime > 0 ) { SysStats.minTimeStep = MIN(SysStats.minTimeStep, tStep); } SysStats.avgTimeStep += tStep; SysStats.maxTimeStep = MAX(SysStats.maxTimeStep, tStep); // --- update iteration step count stats SysStats.avgStepCount += stepCount; } //// // --- update max. system outfall flow MaxOutfallFlow = MAX(MaxOutfallFlow, SysOutfallFlow); } //============================================================================= void stats_updateCriticalTimeCount(int node, int link) // // Input: node = node index // link = link index // Output: none // Purpose: updates count of times a node or link was time step-critical. // { if ( node >= 0 ) NodeStats[node].timeCourantCritical += 1.0; else if ( link >= 0 ) LinkStats[link].timeCourantCritical += 1.0; } //============================================================================= //// Function modified for release 5.1.008. //// //(5.1.008) void stats_updateNodeStats(int j, double tStep, DateTime aDate) // // Input: j = node index // tStep = routing time step (sec) // aDate = current date/time // Output: none // Purpose: updates flow statistics for a node. // { int k, p; double newVolume = Node[j].newVolume; double newDepth = Node[j].newDepth; int canPond = (AllowPonding && Node[j].pondedArea > 0.0); // --- update depth statistics NodeStats[j].avgDepth += newDepth; if ( newDepth > NodeStats[j].maxDepth ) { NodeStats[j].maxDepth = newDepth; NodeStats[j].maxDepthDate = aDate; } // --- update flooding, ponding, and surcharge statistics if ( Node[j].type != OUTFALL ) { if ( newVolume > Node[j].fullVolume || Node[j].overflow > 0.0 ) { NodeStats[j].timeFlooded += tStep; NodeStats[j].volFlooded += Node[j].overflow * tStep; if ( canPond ) NodeStats[j].maxPondedVol = MAX(NodeStats[j].maxPondedVol, (newVolume - Node[j].fullVolume)); } // --- for dynamic wave routing, classify a non-storage node as //(5.1.011) // surcharged if its water level exceeds its crown elev. //(5.1.011) if ( RouteModel == DW && Node[j].type != STORAGE && //(5.1.011) newDepth + Node[j].invertElev + FUDGE >= Node[j].crownElev ) { NodeStats[j].timeSurcharged += tStep; } } // --- update storage statistics if ( Node[j].type == STORAGE ) { k = Node[j].subIndex; StorageStats[k].avgVol += newVolume; StorageStats[k].evapLosses += Storage[Node[j].subIndex].evapLoss; StorageStats[k].exfilLosses += Storage[Node[j].subIndex].exfilLoss; newVolume = MIN(newVolume, Node[j].fullVolume); if ( newVolume > StorageStats[k].maxVol ) { StorageStats[k].maxVol = newVolume; StorageStats[k].maxVolDate = aDate; } StorageStats[k].maxFlow = MAX(StorageStats[k].maxFlow, Node[j].outflow); } // --- update outfall statistics if ( Node[j].type == OUTFALL ) { k = Node[j].subIndex; if ( Node[j].inflow >= MIN_RUNOFF_FLOW ) { OutfallStats[k].avgFlow += Node[j].inflow; OutfallStats[k].maxFlow = MAX(OutfallStats[k].maxFlow, Node[j].inflow); OutfallStats[k].totalPeriods++; } for (p=0; p<Nobjects[POLLUT]; p++) { OutfallStats[k].totalLoad[p] += Node[j].inflow * Node[j].newQual[p] * tStep; } SysOutfallFlow += Node[j].inflow; } // --- update inflow statistics NodeStats[j].totLatFlow += ( (Node[j].oldLatFlow + Node[j].newLatFlow) * 0.5 * tStep ); if ( fabs(Node[j].newLatFlow) > fabs(NodeStats[j].maxLatFlow) ) NodeStats[j].maxLatFlow = Node[j].newLatFlow; if ( Node[j].inflow > NodeStats[j].maxInflow ) { NodeStats[j].maxInflow = Node[j].inflow; NodeStats[j].maxInflowDate = aDate; } // --- update overflow statistics if ( Node[j].overflow > NodeStats[j].maxOverflow ) { NodeStats[j].maxOverflow = Node[j].overflow; NodeStats[j].maxOverflowDate = aDate; } } //============================================================================= void stats_updateLinkStats(int j, double tStep, DateTime aDate) // // Input: j = link index // tStep = routing time step (sec) // aDate = current date/time // Output: none // Purpose: updates flow statistics for a link. // { int k; double q, v; double dq; // --- update max. flow dq = Link[j].newFlow - Link[j].oldFlow; q = fabs(Link[j].newFlow); if ( q > LinkStats[j].maxFlow ) { LinkStats[j].maxFlow = q; LinkStats[j].maxFlowDate = aDate; } // --- update max. velocity v = link_getVelocity(j, q, Link[j].newDepth); if ( v > LinkStats[j].maxVeloc ) { LinkStats[j].maxVeloc = v; //LinkStats[j].maxVelocDate = aDate; //(5.1.008) } // --- update max. depth if ( Link[j].newDepth > LinkStats[j].maxDepth ) { LinkStats[j].maxDepth = Link[j].newDepth; } if ( Link[j].type == PUMP ) { if ( q >= Link[j].qFull ) LinkStats[j].timeFullFlow += tStep; if ( q > MIN_RUNOFF_FLOW ) { k = Link[j].subIndex; PumpStats[k].minFlow = MIN(PumpStats[k].minFlow, q); PumpStats[k].maxFlow = LinkStats[j].maxFlow; PumpStats[k].avgFlow += q; PumpStats[k].volume += q*tStep; PumpStats[k].utilized += tStep; PumpStats[k].energy += link_getPower(j)*tStep/3600.0; if ( Link[j].flowClass == DN_DRY ) PumpStats[k].offCurveLow += tStep; if ( Link[j].flowClass == UP_DRY ) PumpStats[k].offCurveHigh += tStep; if ( Link[j].oldFlow < MIN_RUNOFF_FLOW ) PumpStats[k].startUps++; PumpStats[k].totalPeriods++; LinkStats[j].timeSurcharged += tStep; LinkStats[j].timeFullUpstream += tStep; LinkStats[j].timeFullDnstream += tStep; } } else if ( Link[j].type == CONDUIT ) { // --- update time under normal flow & inlet control if ( Link[j].normalFlow ) LinkStats[j].timeNormalFlow += tStep; if ( Link[j].inletControl ) LinkStats[j].timeInletControl += tStep; // --- update flow classification distribution k = Link[j].flowClass; if ( k >= 0 && k < MAX_FLOW_CLASSES ) { ++LinkStats[j].timeInFlowClass[k]; } // --- update time conduit is full k = Link[j].subIndex; if ( q >= Link[j].qFull * (double)Conduit[k].barrels ) //(5.1.012) LinkStats[j].timeFullFlow += tStep; if ( Conduit[k].capacityLimited ) LinkStats[j].timeCapacityLimited += tStep; //// Following section modified for release 5.1.008. //// //(5.1.008) //// switch (Conduit[k].fullState) { case ALL_FULL: LinkStats[j].timeSurcharged += tStep; LinkStats[j].timeFullUpstream += tStep; LinkStats[j].timeFullDnstream += tStep; break; case UP_FULL: LinkStats[j].timeFullUpstream += tStep; break; case DN_FULL: LinkStats[j].timeFullDnstream += tStep; } //// } // --- update flow turn count k = LinkStats[j].flowTurnSign; LinkStats[j].flowTurnSign = SGN(dq); if ( fabs(dq) > 0.001 && k * LinkStats[j].flowTurnSign < 0 ) LinkStats[j].flowTurns++; } //============================================================================= void stats_findMaxStats() // // Input: none // Output: none // Purpose: finds nodes & links with highest mass balance errors // & highest times Courant time-step critical. // { int j; double x; // --- initialize max. stats arrays for (j=0; j<MAX_STATS; j++) { MaxMassBalErrs[j].objType = NODE; MaxMassBalErrs[j].index = -1; MaxMassBalErrs[j].value = -1.0; MaxCourantCrit[j].index = -1; MaxCourantCrit[j].value = -1.0; MaxFlowTurns[j].index = -1; MaxFlowTurns[j].value = -1.0; } // --- find links with most flow turns if ( StepCount > 2 ) { for (j=0; j<Nobjects[LINK]; j++) { x = 100.0 * LinkStats[j].flowTurns / (2./3.*(StepCount-2)); stats_updateMaxStats(MaxFlowTurns, LINK, j, x); } } // --- find nodes with largest mass balance errors for (j=0; j<Nobjects[NODE]; j++) { // --- skip terminal nodes and nodes with negligible inflow if ( Node[j].degree <= 0 ) continue; if ( NodeInflow[j] <= 0.1 ) continue; // --- evaluate mass balance error // (Note: NodeInflow & NodeOutflow include any initial and final // stored volumes, respectively). if ( NodeInflow[j] > 0.0 ) x = 1.0 - NodeOutflow[j] / NodeInflow[j]; else if ( NodeOutflow[j] > 0.0 ) x = -1.0; else x = 0.0; stats_updateMaxStats(MaxMassBalErrs, NODE, j, 100.0*x); } // --- stop if not using a variable time step if ( RouteModel != DW || CourantFactor == 0.0 ) return; // --- find nodes most frequently Courant critical if ( StepCount == 0 ) return; //(5.1.008) for (j=0; j<Nobjects[NODE]; j++) { x = NodeStats[j].timeCourantCritical / StepCount; stats_updateMaxStats(MaxCourantCrit, NODE, j, 100.0*x); } // --- find links most frequently Courant critical for (j=0; j<Nobjects[LINK]; j++) { x = LinkStats[j].timeCourantCritical / StepCount; stats_updateMaxStats(MaxCourantCrit, LINK, j, 100.0*x); } } //============================================================================= void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x) // // Input: maxStats[] = array of critical statistics values // i = object category (NODE or LINK) // j = object index // x = value of statistic for the object // Output: none // Purpose: updates the collection of most critical statistics // { int k; TMaxStats maxStats1, maxStats2; maxStats1.objType = i; maxStats1.index = j; maxStats1.value = x; for (k=0; k<MAX_STATS; k++) { if ( fabs(maxStats1.value) > fabs(maxStats[k].value) ) { maxStats2 = maxStats[k]; maxStats[k] = maxStats1; maxStats1 = maxStats2; } } } //=============================================================================
matmul-decompose.c
/* * Square matrix multiplication * A[N][N] * B[N][N] = C[N][N] * */ #include <stdio.h> #include <stdlib.h> #include <sys/timeb.h> #include <omp.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int N, REAL A[][N]) { int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { A[i][j] = (REAL) drand48(); } } } double maxerror(int N, REAL A[][N], REAL B[][N]) { int i, j; double error = 0.0; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { double diff = (A[i][j] - B[i][j]) / A[i][j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL A[][N], REAL B[][N], REAL C[][N]); void matmul_base_sub(int i_start, int j_start, int Mt, int Nt, int N, REAL A[][N], REAL B[][N], REAL C[][N]); void matmul_row1D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks); void matmul_column1D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks); void matmul_rowcol2D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks_row, int num_tasks_col); int main(int argc, char *argv[]) { int N; int num_tasks = 4; /* 4 is default number of tasks */ double elapsed_base, elapsed_row1D_dist, elapsed_column1D_dist, elapsed_rowcol2D_dist; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL A[N][N]; REAL B[N][N]; REAL C_base[N][N]; REAL C_row1D_dist[N][N]; REAL C_column1D_dist[N][N]; REAL C_rowcol2D_dist[N][N]; srand48(1 << 12); init(N, A); init(N, B); /* example run */ elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_row1D_dist = read_timer(); matmul_row1D_dist(N, A, B, C_row1D_dist, num_tasks); elapsed_row1D_dist = (read_timer() - elapsed_row1D_dist); elapsed_column1D_dist = read_timer(); matmul_column1D_dist(N, A, B, C_column1D_dist, num_tasks); elapsed_column1D_dist = (read_timer() - elapsed_column1D_dist); elapsed_rowcol2D_dist = read_timer(); matmul_rowcol2D_dist(N, A, B, C_rowcol2D_dist, num_tasks, num_tasks); elapsed_rowcol2D_dist = (read_timer() - elapsed_rowcol2D_dist); /* you should add the call to each function and time the execution */ printf("======================================================================================================\n"); printf("\tMatrix Multiplication: A[N][N] * B[N][N] = C[N][N], N=%d\n", N); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, C_base, C_base)); printf("matmul_row1D_dist:\t%4f\t%4f \t\t%g\n", elapsed_row1D_dist * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_row1D_dist)), maxerror(N, C_base, C_row1D_dist)); printf("matmul_column1D_dist:\t%4f\t%4f \t\t%g\n", elapsed_column1D_dist * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_column1D_dist)), maxerror(N, C_base, C_column1D_dist)); printf("matmul_rowcol2D_dist:\t%4f\t%4f \t\t%g\n", elapsed_rowcol2D_dist * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_rowcol2D_dist)), maxerror(N, C_base, C_rowcol2D_dist)); return 0; } void matmul_base(int N, REAL A[][N], REAL B[][N], REAL C[][N]) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { C[i][j] = 0; for (k = 0; k < N; k++) C[i][j] += A[i][k] * B[k][j]; } } } /* compute submatrix multiplication, A[start:length] notation * A[i_start:Mt][N] x B[N][j_start:Nt] = C[i_start:Mt][j_start:Nt] */ void matmul_base_sub(int i_start, int j_start, int Mt, int Nt, int N, REAL A[][N], REAL B[][N], REAL C[][N]) { int i, j, k; for (i = i_start; i < Mt + i_start; i++) { for (j = j_start; j < Nt + j_start; j++) { C[i][j] = 0; for (k = 0; k < N; k++) C[i][j] += A[i][k] * B[k][j]; } } } /* this is a sequential verion showing the decomposition */ void matmul_row1D_dist_seq(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks) { int tid; for (tid = 0; tid < num_tasks; tid++) { int i_start, j_start; int Mt, Nt; Mt = N / num_tasks; Nt = N; i_start = tid * Mt; j_start = 0; matmul_base_sub(i_start, j_start, Mt, Nt, N, A, B, C); } } void matmul_row1D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks) { #pragma omp parallel num_threads(num_tasks) { int tid = omp_get_thread_num(); int i_start, j_start; int Mt, Nt; Mt = N / num_tasks; Nt = N; i_start = tid * Mt; j_start = 0; matmul_base_sub(i_start, j_start, Mt, Nt, N, A, B, C); } } void matmul_column1D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks) { #pragma omp parallel num_threads(num_tasks) { int tid = omp_get_thread_num(); int i_start, j_start; int Mt, Nt; Mt = N; Nt = N / num_tasks; i_start = 0; j_start = tid * Nt; matmul_base_sub(i_start, j_start, Mt, Nt, N, A, B, C); } } void matmul_rowcol2D_dist(int N, REAL A[][N], REAL B[][N], REAL C[][N], int num_tasks_row, int num_tasks_col) { #pragma omp parallel num_threads(num_tasks_row + num_tasks_col) { int tid = omp_get_thread_num(); int i_start, j_start; int Mt, Nt; Mt = N / num_tasks_row; Nt = N / num_tasks_col; i_start = tid * Mt; j_start = tid * Nt; matmul_base_sub(i_start, j_start, Mt, Nt, N, A, B, C); } }
ast-dump-openmp-target-teams-distribute-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target teams distribute simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target teams distribute simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target teams distribute simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target teams distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target teams distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:4:1, col:41> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:10:1, col:41> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:17:1, col:53> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 1 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1> // CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:24:1, col:53> // CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52> // CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int' // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 2 // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1> // CHECK-NEXT: `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:31:1, col:53> // CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:42, col:52> // CHECK-NEXT: | `-ConstantExpr {{.*}} <col:51> 'int' // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:51> 'int' 2 // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> // CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21> // CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-<<<NULL>>> // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict' // CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
weightedInnerProduct.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void weightedInnerProduct2(const dlong & N, const dfloat * __restrict__ cpu_w, const dfloat * __restrict__ cpu_a, const dfloat * __restrict__ cpu_b, dfloat * __restrict__ cpu_wab ){ dfloat wab = 0; #pragma omp parallel for reduction(+: wab) for(dlong id=0;id<N;++id) wab += cpu_a[id]*cpu_b[id]*cpu_w[id]; cpu_wab[0] = wab; } extern "C" void weightedMultipleInnerProduct2(const dlong & N, const dlong & offset, dfloat * __restrict__ cpu_w, dfloat * __restrict__ cpu_a, dfloat * __restrict__ cpu_b, dfloat * __restrict__ cpu_wab ){ dfloat wab = 0; for(dlong fld=0;fld<p_Nfields;++fld){ #pragma omp parallel for reduction(+: wab) for(dlong id=0;id<N;++id) wab += cpu_a[id+fld*offset]*cpu_b[id+fld*offset]*cpu_w[id+fld*offset]; } cpu_wab[0] = wab; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,4),ceild(4*t2-Nz+5,8));t3<=min(min(floord(4*Nt+Ny-9,8),floord(2*t1+Ny-3,8)),floord(4*t2+Ny-9,8));t3++) { for (t4=max(max(ceild(t1-124,128),ceild(4*t2-Nz-243,256)),ceild(8*t3-Ny-243,256));t4<=min(min(min(floord(4*Nt+Nx-9,256),floord(2*t1+Nx-3,256)),floord(4*t2+Nx-9,256)),floord(8*t3+Nx-5,256));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(256*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
scheme.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #if !defined(KRATOS_SCHEME ) #define KRATOS_SCHEME /* System includes */ /* External includes */ /* Project includes */ #include "includes/model_part.h" #include "utilities/openmp_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class Scheme * @ingroup KratosCore * @brief This class provides the implementation of the basic tasks that are needed by the solution strategy. * @details It is intended to be the place for tailoring the solution strategies to problem specific tasks. * @tparam TSparseSpace The sparse space considered * @tparam TDenseSpace The dense space considered * @author Riccardo Rossi */ template<class TSparseSpace, class TDenseSpace //= DenseSpace<double> > class Scheme { public: ///@name Type Definitions ///@{ /// Pointer definition of Scheme KRATOS_CLASS_POINTER_DEFINITION(Scheme); /// Data type definition typedef typename TSparseSpace::DataType TDataType; /// Matrix type definition typedef typename TSparseSpace::MatrixType TSystemMatrixType; /// Vector type definition typedef typename TSparseSpace::VectorType TSystemVectorType; /// Local system matrix type definition typedef typename TDenseSpace::MatrixType LocalSystemMatrixType; /// Local system vector type definition typedef typename TDenseSpace::VectorType LocalSystemVectorType; /// DoF type definition typedef Dof<double> TDofType; /// DoF array type definition typedef ModelPart::DofsArrayType DofsArrayType; /// DoF iterator type definition typedef typename PointerVectorSet<TDofType, IndexedObject>::iterator DofIterator; /// DoF constant iterator type definition typedef typename PointerVectorSet<TDofType, IndexedObject>::const_iterator DofConstantIterator; /// Elements containers definition typedef ModelPart::ElementsContainerType ElementsArrayType; /// Conditions containers definition typedef ModelPart::ConditionsContainerType ConditionsArrayType; /** * @class LocalSystemComponents * @brief This struct is used in the component wise calculation only is defined here and is used to declare a member variable in the component wise schemes private pointers can only be accessed by means of set and get functions * @details This allows to set and not copy the Element_Variables and Condition_Variables which will be asked and set by another strategy object */ struct LocalSystemComponents { private: ///@name Member Variables ///@{ // Elements std::vector<LocalSystemMatrixType> *mpLHS_Element_Components; const std::vector< Variable< LocalSystemMatrixType > > *mpLHS_Element_Variables; std::vector<LocalSystemVectorType> *mpRHS_Element_Components; const std::vector< Variable< LocalSystemVectorType > > *mpRHS_Element_Variables; // Conditions std::vector<LocalSystemMatrixType> *mpLHS_Condition_Components; const std::vector< Variable< LocalSystemMatrixType > > *mpLHS_Condition_Variables; std::vector<LocalSystemVectorType> *mpRHS_Condition_Components; const std::vector< Variable< LocalSystemVectorType > > *mpRHS_Condition_Variables; ///@} public: ///@name Operations ///@{ /** * @brief This method initializes the pointer of the member variables */ void Initialize() { mpLHS_Element_Components = NULL; mpLHS_Element_Variables = NULL; mpRHS_Element_Components = NULL; mpRHS_Element_Variables = NULL; mpLHS_Condition_Components = NULL; mpLHS_Condition_Variables = NULL; mpRHS_Condition_Components = NULL; mpRHS_Condition_Variables = NULL; } /* Setting pointer variables */ // Elements void SetLHS_Element_Components ( std::vector<LocalSystemMatrixType>& rLHS_Element_Components ) { mpLHS_Element_Components = &rLHS_Element_Components; }; void SetLHS_Element_Variables ( const std::vector< Variable< LocalSystemMatrixType > >& rLHS_Element_Variables ) { mpLHS_Element_Variables = &rLHS_Element_Variables; }; void SetRHS_Element_Components ( std::vector<LocalSystemVectorType>& rRHS_Element_Components ) { mpRHS_Element_Components = &rRHS_Element_Components; }; void SetRHS_Element_Variables ( const std::vector< Variable< LocalSystemVectorType > >& rRHS_Element_Variables ) { mpRHS_Element_Variables = &rRHS_Element_Variables; }; bool Are_LHS_Element_Components_Set() { if( mpLHS_Element_Variables == NULL ) return false; else return true; }; bool Are_RHS_Element_Components_Set() { if( mpRHS_Element_Variables == NULL ) return false; else return true; }; // Conditions void SetLHS_Condition_Components ( std::vector<LocalSystemMatrixType>& rLHS_Condition_Components ) { mpLHS_Condition_Components = &rLHS_Condition_Components; }; void SetLHS_Condition_Variables ( const std::vector< Variable< LocalSystemMatrixType > >& rLHS_Condition_Variables ) { mpLHS_Condition_Variables = &rLHS_Condition_Variables; }; void SetRHS_Condition_Components ( std::vector<LocalSystemVectorType>& rRHS_Condition_Components ) { mpRHS_Condition_Components = &rRHS_Condition_Components; }; void SetRHS_Condition_Variables ( const std::vector< Variable< LocalSystemVectorType > >& rRHS_Condition_Variables ) { mpRHS_Condition_Variables = &rRHS_Condition_Variables; }; bool Are_LHS_Condition_Components_Set() { if( mpLHS_Condition_Variables == NULL ) return false; else return true; }; bool Are_RHS_Condition_Components_Set() { if( mpRHS_Condition_Variables == NULL ) return false; else return true; }; /* Getting pointer variables */ // Elements std::vector<LocalSystemMatrixType>& GetLHS_Element_Components() { return *mpLHS_Element_Components; }; const std::vector< Variable< LocalSystemMatrixType > >& GetLHS_Element_Variables() { return *mpLHS_Element_Variables; }; std::vector<LocalSystemVectorType>& GetRHS_Element_Components() { return *mpRHS_Element_Components; }; const std::vector< Variable< LocalSystemVectorType > >& GetRHS_Element_Variables() { return *mpRHS_Element_Variables; }; // Conditions std::vector<LocalSystemMatrixType>& GetLHS_Condition_Components() { return *mpLHS_Condition_Components; }; const std::vector< Variable< LocalSystemMatrixType > >& GetLHS_Condition_Variables() { return *mpLHS_Condition_Variables; }; std::vector<LocalSystemVectorType>& GetRHS_Condition_Components() { return *mpRHS_Condition_Components; }; const std::vector< Variable< LocalSystemVectorType > >& GetRHS_Condition_Variables() { return *mpRHS_Condition_Variables; }; ///@} }; ///@} ///@name Life Cycle ///@{ /** * @brief Default Constructor * @details Initiliazes the flags */ explicit Scheme() { mSchemeIsInitialized = false; mElementsAreInitialized = false; mConditionsAreInitialized = false; } /** Copy Constructor. */ explicit Scheme(Scheme& rOther) :mSchemeIsInitialized(rOther.mSchemeIsInitialized) ,mElementsAreInitialized(rOther.mElementsAreInitialized) ,mConditionsAreInitialized(rOther.mConditionsAreInitialized) { } /** Destructor. */ virtual ~Scheme() { } ///@} ///@name Operators ///@{ /** * @brief Clone method * @return The pointer of the cloned scheme */ virtual Pointer Clone() { return Kratos::make_shared<Scheme>(*this) ; } /** * @brief Component wise components Get method * @warning Must be defined on the derived classes * @return The local system of components */ virtual LocalSystemComponents& GetLocalSystemComponents() { KRATOS_ERROR << "Asking for Local Components to the SCHEME base class which is not component wise and not contains this member variable" << std::endl; } /** * @brief This is the place to initialize the Scheme. * @details This is intended to be called just once when the strategy is initialized * @param rModelPart The model part of the problem to solve */ virtual void Initialize(ModelPart& rModelPart) { KRATOS_TRY mSchemeIsInitialized = true; KRATOS_CATCH("") } /** * @brief This method returns if the scheme is initialized * @return True if initilized, false otherwise */ bool SchemeIsInitialized() { return mSchemeIsInitialized; } /** * @brief This method sets if the elements have been initilized or not (true by default) * @param ElementsAreInitializedFlag If the flag must be set to true or false */ void SetSchemeIsInitialized(bool SchemeIsInitializedFlag = true) { mSchemeIsInitialized = SchemeIsInitializedFlag; } /** * @brief This method returns if the elements are initialized * @return True if initilized, false otherwise */ bool ElementsAreInitialized() { return mElementsAreInitialized; } /** * @brief This method sets if the elements have been initilized or not (true by default) * @param ElementsAreInitializedFlag If the flag must be set to true or false */ void SetElementsAreInitialized(bool ElementsAreInitializedFlag = true) { mElementsAreInitialized = ElementsAreInitializedFlag; } /** * @brief This method returns if the conditions are initialized * @return True if initilized, false otherwise */ bool ConditionsAreInitialized() { return mConditionsAreInitialized; } /** * @brief This method sets if the conditions have been initilized or not (true by default) * @param ConditionsAreInitializedFlag If the flag must be set to true or false */ void SetConditionsAreInitialized(bool ConditionsAreInitializedFlag = true) { mConditionsAreInitialized = ConditionsAreInitializedFlag; } /** * @brief This is the place to initialize the elements. * @details This is intended to be called just once when the strategy is initialized * @param rModelPart The model part of the problem to solve */ virtual void InitializeElements( ModelPart& rModelPart) { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); i++) { auto it_elem = rModelPart.ElementsBegin() + i; it_elem->Initialize(r_current_process_info); } SetElementsAreInitialized(); KRATOS_CATCH("") } /** * @brief This is the place to initialize the conditions. * @details This is intended to be called just once when the strategy is initialized * @param rModelPart The model part of the problem to solve */ virtual void InitializeConditions(ModelPart& rModelPart) { KRATOS_TRY KRATOS_ERROR_IF_NOT(mElementsAreInitialized) << "Before initilizing Conditions, initialize Elements FIRST" << std::endl; const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); i++) { auto it_cond = rModelPart.ConditionsBegin() + i; it_cond->Initialize(r_current_process_info); } SetConditionsAreInitialized(); KRATOS_CATCH("") } /** * @brief Function called once at the beginning of each solution step. * @details The basic operations to be carried in there are the following: * - managing variables to be kept constant over the time step (for example time-Scheme constants depending on the actual time step) * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void InitializeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Definition of the first element iterator const auto it_elem_begin = rModelPart.ElementsBegin(); // Initializes solution step for all of the elements #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) { auto it_elem = it_elem_begin + i; it_elem->InitializeSolutionStep(r_current_process_info); } // Definition of the first condition iterator const auto it_cond_begin = rModelPart.ConditionsBegin(); // Initializes solution step for all of the conditions #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) { auto it_cond = it_cond_begin + i; it_cond->InitializeSolutionStep(r_current_process_info); } // Definition of the first constraint iterator const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin(); // Initializes solution step for all of the constraints #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) { auto it_const = it_const_begin + i; it_const->InitializeSolutionStep(r_current_process_info); } KRATOS_CATCH("") } /** * @brief Function called once at the end of a solution step, after convergence is reached if an iterative process is needed * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void FinalizeSolutionStep( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b) { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Definition of the first element iterator const auto it_elem_begin = rModelPart.ElementsBegin(); // Finalizes solution step for all of the elements #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) { auto it_elem = it_elem_begin + i; it_elem->FinalizeSolutionStep(r_current_process_info); } // Definition of the first condition iterator const auto it_cond_begin = rModelPart.ConditionsBegin(); // Finalizes solution step for all of the conditions #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) { auto it_cond = it_cond_begin + i; it_cond->FinalizeSolutionStep(r_current_process_info); } // Definition of the first constraint iterator const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin(); // Finalizes solution step for all of the constraints #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) { auto it_const = it_const_begin + i; it_const->FinalizeSolutionStep(r_current_process_info); } KRATOS_CATCH("") } /************************ BEGIN FRACTIONAL STEP METHODS ****************************/ /********************* TODO: DECIDE IF NECESSARY TO DEFINE *************************/ /***********************************************************************************/ // /** // * @brief Initializes solution step, to be used when system is not explicitely defined // * @details For example for fractional step strategies // * @warning Must be defined in derived classes // * @param rModelPart The model part of the problem to solve // */ // virtual void InitializeSolutionStep(ModelPart& rModelPart) // { // KRATOS_TRY // KRATOS_CATCH("") // } // // /** // * @brief Finalizes solution step, to be used when system is not explicitely defined // * @details For example for fractional step strategies // * @warning Must be defined in derived classes // * @param rModelPart The model part of the problem to solve // */ // virtual void FinalizeSolutionStep(ModelPart& rModelPart) // { // KRATOS_TRY // KRATOS_CATCH("") // } // // /** // * @brief Executed before each fractional step // * @warning Must be defined in derived classes // * @param rModelPart The model part of the problem to solve // */ // virtual void InitializeFractionalSolutionStep(ModelPart& rModelPart) // { // KRATOS_TRY // KRATOS_CATCH("") // } // // /** // * @brief Executed after each fractional step // * @warning Must be defined in derived classes // * @param rModelPart The model part of the problem to solve // */ // virtual void FinalizeFractionalSolutionStep(ModelPart& rModelPart) // { // KRATOS_TRY // KRATOS_CATCH("") // } /************************ END FRACTIONAL STEP METHODS ****************************/ /***********************************************************************************/ /** * @brief unction to be called when it is needed to initialize an iteration. It is designed to be called at the beginning of each non linear iteration * @note Take care: the elemental function with the same name is NOT called here. * @warning Must be defined in derived classes * @details The function is called in the builder for memory efficiency * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void InitializeNonLinIteration( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief It initializes a non-linear iteration (for an individual condition) * @warning Must be defined in derived classes * @param rCurrentElement The element to compute * @param rCurrentProcessInfo The current process info instance */ virtual void InitializeNonLinearIteration( Element::Pointer rCurrentElement, ProcessInfo& rCurrentProcessInfo ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief It initializes a non-linear iteration (for an individual condition) * @warning Must be defined in derived classes * @param rCurrentCondition The condition to compute * @param rCurrentProcessInfo The current process info instance */ virtual void InitializeNonLinearIteration( Condition::Pointer rCurrentCondition, ProcessInfo& rCurrentProcessInfo ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Function to be called when it is needed to finalize an iteration. It is designed to be called at the end of each non linear iteration * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void FinalizeNonLinIteration( ModelPart& rModelPart, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Definition of the first element iterator const auto it_elem_begin = rModelPart.ElementsBegin(); // Finalizes non-linear iteration for all of the elements #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Elements().size()); ++i) { auto it_elem = it_elem_begin + i; it_elem->FinalizeNonLinearIteration(r_current_process_info); } // Definition of the first condition iterator const auto it_cond_begin = rModelPart.ConditionsBegin(); // Finalizes non-linear iteration for all of the conditions #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.Conditions().size()); ++i) { auto it_cond = it_cond_begin + i; it_cond->FinalizeNonLinearIteration(r_current_process_info); } // Definition of the first constraint iterator const auto it_const_begin = rModelPart.MasterSlaveConstraintsBegin(); // Finalizes non-linear iteration for all of the constraints #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.MasterSlaveConstraints().size()); ++i) { auto it_const = it_const_begin + i; it_const->FinalizeNonLinearIteration(r_current_process_info); } KRATOS_CATCH("") } /** * @brief Performing the prediction of the solution. * @warning Must be defined in derived classes * @param rModelPart The model part of the problem to solve * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void Predict( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Performing the update of the solution. * @warning Must be defined in derived classes * @param rModelPart The model part of the problem to solve * @param rDofSet Set of all primary variables * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void Update( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Functions to be called to prepare the data needed for the output of results. * @warning Must be defined in derived classes * @param rModelPart The model part of the problem to solve * @param rDofSet Set of all primary variables * @param A LHS matrix * @param Dx Incremental update of primary variables * @param b RHS Vector */ virtual void CalculateOutputData( ModelPart& rModelPart, DofsArrayType& rDofSet, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b ) { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Functions that cleans the results data. * @warning Must be implemented in the derived classes */ virtual void CleanOutputData() { KRATOS_TRY KRATOS_CATCH("") } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed after the end of the solution step * @warning Must be implemented in the derived classes */ virtual void Clean() { KRATOS_TRY KRATOS_CATCH("") } /** * @brief Function to clean up "element" scratch space after each element is built. * @param rElement The element to compute */ virtual void CleanMemory(Element& rElement) { this->CleanMemory(Element::Pointer(&rElement)); // TODO remove this after the transition period and uncomment the following // rElement.CleanMemory(); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void CleanMemory(Element::Pointer rCurrentElement) { rCurrentElement->CleanMemory(); } /** * @brief Function to clean up "condition" scratch space after each condition is built. * @param rCondition The condition to compute */ virtual void CleanMemory(Condition& rCondition) { this->CleanMemory(Condition::Pointer(&rCondition)); // TODO remove this after the transition period and uncomment the following // rCondition.CleanMemory(); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void CleanMemory(Condition::Pointer rCurrentCondition) { rCurrentCondition->CleanMemory(); } /** * @brief Liberate internal storage. * @warning Must be implemented in the derived classes */ virtual void Clear() { KRATOS_TRY KRATOS_CATCH("") } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @details Checks can be "expensive" as the function is designed * @param rModelPart The model part of the problem to solve * @return 0 all OK, 1 otherwise */ virtual int Check(const ModelPart& rModelPart) const { KRATOS_TRY const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Checks for all of the elements #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.NumberOfElements()); i++) { auto it_elem = rModelPart.ElementsBegin() + i; it_elem->Check(r_current_process_info); } // Checks for all of the conditions #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.NumberOfConditions()); i++) { auto it_cond = rModelPart.ConditionsBegin() + i; it_cond->Check(r_current_process_info); } // Checks for all of the constraints #pragma omp parallel for for(int i=0; i<static_cast<int>(rModelPart.NumberOfMasterSlaveConstraints()); i++) { auto it_constraint = rModelPart.MasterSlaveConstraintsBegin() + i; it_constraint->Check(r_current_process_info); } return 0; KRATOS_CATCH(""); } virtual int Check(ModelPart& rModelPart) { // calling the const version for backward compatibility const Scheme& r_const_this = *this; const ModelPart& r_const_model_part = rModelPart; return r_const_this.Check(r_const_model_part); } /** * @brief This function is designed to be called in the builder and solver to introduce the selected time integration scheme. * @details It "asks" the matrix needed to the element and performs the operations needed to introduce the selected time integration scheme. This function calculates at the same time the contribution to the LHS and to the RHS of the system * @param rElement The element to compute * @param LHS_Contribution The LHS matrix contribution * @param RHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateSystemContributions( Element& rElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->CalculateSystemContributions( Element::Pointer(&rElement), LHS_Contribution, RHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void CalculateSystemContributions( Element::Pointer pCurrentElement, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentElement->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param rCondition The condition to compute * @param LHS_Contribution The LHS matrix contribution * @param RHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateSystemContributions( Condition& rCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Condition_CalculateSystemContributions( Condition::Pointer(&rCondition), LHS_Contribution, RHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Condition_CalculateSystemContributions( Condition::Pointer pCurrentCondition, LocalSystemMatrixType& LHS_Contribution, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentCondition->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); } /** * @brief This function is designed to calculate just the RHS contribution * @param rElement The element to compute * @param RHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateRHSContribution( Element& rElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Calculate_RHS_Contribution( Element::Pointer(&rElement), RHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rElement.CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Calculate_RHS_Contribution( Element::Pointer pCurrentElement, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentElement->CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo); } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param rCondition The condition to compute * @param RHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateRHSContribution( Condition& rCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Condition_Calculate_RHS_Contribution( Condition::Pointer(&rCondition), RHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rCondition.CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Condition_Calculate_RHS_Contribution( Condition::Pointer pCurrentCondition, LocalSystemVectorType& RHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentCondition->CalculateRightHandSide(RHS_Contribution, rCurrentProcessInfo); } /** * @brief This function is designed to calculate just the LHS contribution * @param rElement The element to compute * @param LHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateLHSContribution( Element& rElement, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Calculate_LHS_Contribution( Element::Pointer(&rElement), LHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rElement.CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Calculate_LHS_Contribution( Element::Pointer pCurrentElement, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentElement->CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo); } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param rCondition The condition to compute * @param LHS_Contribution The RHS vector contribution * @param rEquationIdVector The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void CalculateLHSContribution( Condition& rCondition, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& rEquationIdVector, const ProcessInfo& rCurrentProcessInfo ) { this->Condition_Calculate_LHS_Contribution( Condition::Pointer(&rCondition), LHS_Contribution, rEquationIdVector, const_cast<ProcessInfo&>(rCurrentProcessInfo) ); // TODO remove this after the transition period and uncomment the following // rrCondition.CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Condition_Calculate_LHS_Contribution( Condition::Pointer pCurrentCondition, LocalSystemMatrixType& LHS_Contribution, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { pCurrentCondition->CalculateLeftHandSide(LHS_Contribution, rCurrentProcessInfo); } /** * @brief This method gets the eqaution id corresponding to the current element * @param rElement The element to compute * @param rEquationId The ID's of the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void EquationId( const Element& rElement, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo ) { rElement.EquationIdVector(rEquationId, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void EquationId( Element::Pointer pCurrentElement, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { (pCurrentElement)->EquationIdVector(EquationId, rCurrentProcessInfo); } /** * @brief Functions totally analogous to the precedent but applied to the "condition" objects * @param rCondition The condition to compute * @param rEquationId The ID's of the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void EquationId( const Condition& rCondition, Element::EquationIdVectorType& rEquationId, const ProcessInfo& rCurrentProcessInfo ) { rCondition.EquationIdVector(rEquationId, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void Condition_EquationId( Condition::Pointer pCurrentCondition, Element::EquationIdVectorType& EquationId, ProcessInfo& rCurrentProcessInfo ) { (pCurrentCondition)->EquationIdVector(EquationId, rCurrentProcessInfo); } /** * @brief Function that returns the list of Degrees of freedom to be assembled in the system for a Given element * @param pCurrentElement The element to compute * @param rDofList The list containing the element degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void GetDofList( const Element& rElement, Element::DofsVectorType& rDofList, const ProcessInfo& rCurrentProcessInfo ) { rElement.GetDofList(rDofList, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void GetElementalDofList( Element::Pointer pCurrentElement, Element::DofsVectorType& ElementalDofList, ProcessInfo& rCurrentProcessInfo ) { pCurrentElement->GetDofList(ElementalDofList, rCurrentProcessInfo); } /** * @brief Function that returns the list of Degrees of freedom to be assembled in the system for a Given condition * @param rCondition The condition to compute * @param rDofList The list containing the condition degrees of freedom * @param rCurrentProcessInfo The current process info instance */ virtual void GetDofList( const Condition& rCondition, Element::DofsVectorType& rDofList, const ProcessInfo& rCurrentProcessInfo ) { rCondition.GetDofList(rDofList, rCurrentProcessInfo); } // KRATOS_DEPRECATED_MESSAGE("This is legacy version, please use the other overload of this function") virtual void GetConditionDofList( Condition::Pointer pCurrentCondition, Element::DofsVectorType& ConditionDofList, ProcessInfo& rCurrentProcessInfo ) { pCurrentCondition->GetDofList(ConditionDofList, rCurrentProcessInfo); } ///@} ///@name Operations ///@{ ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { return "Scheme"; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << Info(); } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ bool mSchemeIsInitialized; /// Flag to be used in controlling if the Scheme has been intialized or not bool mElementsAreInitialized; /// Flag taking in account if the elements were initialized correctly or not bool mConditionsAreInitialized; /// Flag taking in account if the conditions were initialized correctly or not ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; // Class Scheme } // namespace Kratos. #endif /* KRATOS_SCHEME defined */
ompfor4.c
/* * Dynamic schedule */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int a[20]; int foo(int lower, int upper, int stride) { int i; #pragma omp for schedule(dynamic) for (i=lower;i>upper;i-=stride) { a[i]=i*2; printf("Iteration %2d is carried out by thread %2d\n",\ i, omp_get_thread_num()); } } int main(void) { #pragma omp parallel { #pragma omp single printf ("Using %d threads.\n",omp_get_num_threads()); foo(0,20,3); } }
mttkrp.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "mttkrp.h" #include "thd_info.h" #include "tile.h" #include "util.h" #include <omp.h> #define NLOCKS 1024 static omp_lock_t locks[NLOCKS]; /****************************************************************************** * API FUNCTIONS *****************************************************************************/ int splatt_mttkrp( splatt_idx_t const mode, splatt_idx_t const ncolumns, splatt_csf const * const tensors, splatt_val_t ** matrices, splatt_val_t * const matout, double const * const options) { idx_t const nmodes = tensors->nmodes; /* fill matrix pointers */ matrix_t * mats[MAX_NMODES+1]; for(idx_t m=0; m < nmodes; ++m) { mats[m] = (matrix_t *) splatt_malloc(sizeof(matrix_t)); mats[m]->I = tensors->dims[m]; mats[m]->J = ncolumns, mats[m]->rowmajor = 1; mats[m]->vals = matrices[m]; } mats[MAX_NMODES] = (matrix_t *) splatt_malloc(sizeof(matrix_t)); mats[MAX_NMODES]->I = tensors->dims[mode]; mats[MAX_NMODES]->J = ncolumns; mats[MAX_NMODES]->rowmajor = 1; mats[MAX_NMODES]->vals = matout; /* Setup thread structures. + 64 bytes is to avoid false sharing. */ idx_t const nthreads = (idx_t) options[SPLATT_OPTION_NTHREADS]; omp_set_num_threads(nthreads); thd_info * thds = thd_init(nthreads, 3, (ncolumns * ncolumns * sizeof(val_t)) + 64, 0, (nmodes * ncolumns * sizeof(val_t)) + 64); /* do the MTTKRP */ mttkrp_csf(tensors, mats, mode, thds, options); /* cleanup */ thd_free(thds, nthreads); for(idx_t m=0; m < nmodes; ++m) { free(mats[m]); } free(mats[MAX_NMODES]); return SPLATT_SUCCESS; } /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ static inline void p_add_hada( val_t * const restrict out, val_t const * const restrict a, val_t const * const restrict b, idx_t const nfactors) { for(idx_t f=0; f < nfactors; ++f) { out[f] += a[f] * b[f]; } } static inline void p_add_hada_clear( val_t * const restrict out, val_t * const restrict a, val_t const * const restrict b, idx_t const nfactors) { for(idx_t f=0; f < nfactors; ++f) { out[f] += a[f] * b[f]; a[f] = 0; } } static inline void p_assign_hada( val_t * const restrict out, val_t const * const restrict a, val_t const * const restrict b, idx_t const nfactors) { for(idx_t f=0; f < nfactors; ++f) { out[f] = a[f] * b[f]; } } static inline void p_csf_process_fiber_lock( val_t * const leafmat, val_t const * const restrict accumbuf, idx_t const nfactors, idx_t const start, idx_t const end, idx_t const * const restrict inds, val_t const * const restrict vals) { for(idx_t jj=start; jj < end; ++jj) { val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors); val_t const v = vals[jj]; omp_set_lock(locks + (inds[jj] % NLOCKS)); for(idx_t f=0; f < nfactors; ++f) { leafrow[f] += v * accumbuf[f]; } omp_unset_lock(locks + (inds[jj] % NLOCKS)); } } static inline void p_csf_process_fiber_nolock( val_t * const leafmat, val_t const * const restrict accumbuf, idx_t const nfactors, idx_t const start, idx_t const end, idx_t const * const restrict inds, val_t const * const restrict vals) { for(idx_t jj=start; jj < end; ++jj) { val_t * const restrict leafrow = leafmat + (inds[jj] * nfactors); val_t const v = vals[jj]; for(idx_t f=0; f < nfactors; ++f) { leafrow[f] += v * accumbuf[f]; } } } static inline void p_csf_process_fiber( val_t * const restrict accumbuf, idx_t const nfactors, val_t const * const leafmat, idx_t const start, idx_t const end, idx_t const * const inds, val_t const * const vals) { /* foreach nnz in fiber */ for(idx_t j=start; j < end; ++j) { val_t const v = vals[j] ; val_t const * const restrict row = leafmat + (nfactors * inds[j]); for(idx_t f=0; f < nfactors; ++f) { accumbuf[f] += v * row[f]; } } } static inline void p_propagate_up_reuse( val_t * const out, val_t * const * const buf, idx_t * const restrict idxstack, // jli: store the start location for each mode idx_t const init_depth, idx_t const init_idx, idx_t const * const * const fp, idx_t const * const * const fids, val_t const * const restrict vals, val_t ** mvals, idx_t const nmodes, idx_t const nfactors) { /* push initial idx initialize idxstack */ idx_t const valid_nmodes = nmodes-1; idxstack[init_depth] = init_idx; assert(init_depth < nmodes-1); for(idx_t m=init_depth+1; m < nmodes-1; ++m) { // Not count the last mode. idxstack[m] = fp[m-1][idxstack[m-1]]; } idxstack[nmodes-1] = 0; // Don't use the last mode /* clear out accumulation buffer */ for(idx_t f=0; f < nfactors; ++f) { buf[init_depth+1][f] = 0; } while(idxstack[init_depth+1] < fp[init_depth][init_idx+1]) { /* skip to last internal mode */ idx_t depth = nmodes - 3; // fp on mode-(nmodes-2) points to mode-(nmodes-1) idx_t const start = fp[depth][idxstack[depth]]; idx_t const end = fp[depth][idxstack[depth]+1]; // idx_t const start = idxstack[depth+1]; val_t const * restrict fibrow = mvals[depth+1] + (fids[depth+1][start] * nfactors); p_assign_hada(buf[depth+1], vals+start*nfactors, fibrow, nfactors); for (idx_t ln_idx = start+1; ln_idx < end; ++ln_idx) { fibrow = mvals[depth+1] + (fids[depth+1][ln_idx] * nfactors); p_add_hada(buf[depth+1], vals+ln_idx*nfactors, fibrow, nfactors); } idxstack[depth+1] = end; if ( init_depth < nmodes-3 ) { /* Propagate up until we reach a node with more children to process */ do { /* propagate result up and clear buffer for next sibling */ val_t const * const restrict fibrow = mvals[depth] + (fids[depth][idxstack[depth]] * nfactors); p_add_hada_clear(buf[depth], buf[depth+1], fibrow, nfactors); ++idxstack[depth]; --depth; } while(depth > init_depth && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } } /* end DFS */ /* copy to out */ for(idx_t f=0; f < nfactors; ++f) { out[f] = buf[init_depth+1][f]; } return; } static inline void p_propagate_up( val_t * const out, val_t * const * const buf, idx_t * const restrict idxstack, // jli: store the start location for each mode idx_t const init_depth, idx_t const init_idx, idx_t const * const * const fp, idx_t const * const * const fids, val_t const * const restrict vals, val_t ** mvals, idx_t const nmodes, idx_t const nfactors) { /* push initial idx initialize idxstack */ idxstack[init_depth] = init_idx; for(idx_t m=init_depth+1; m < nmodes; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } assert(init_depth < nmodes-1); /* clear out accumulation buffer */ for(idx_t f=0; f < nfactors; ++f) { buf[init_depth+1][f] = 0; } while(idxstack[init_depth+1] < fp[init_depth][init_idx+1]) { /* skip to last internal mode */ idx_t depth = nmodes - 2; // fp on mode-(nmodes-2) points to mode-(nmodes-1) /* process all nonzeros [start, end) into buf[depth]*/ idx_t const start = fp[depth][idxstack[depth]]; idx_t const end = fp[depth][idxstack[depth]+1]; p_csf_process_fiber(buf[depth+1], nfactors, mvals[depth+1], start, end, fids[depth+1], vals); idxstack[depth+1] = end; /* exit early if there is no propagation to do... */ if(init_depth == nmodes-2) { for(idx_t f=0; f < nfactors; ++f) { out[f] = buf[depth+1][f]; } return; } /* Propagate up until we reach a node with more children to process */ do { /* propagate result up and clear buffer for next sibling */ val_t const * const restrict fibrow = mvals[depth] + (fids[depth][idxstack[depth]] * nfactors); p_add_hada_clear(buf[depth], buf[depth+1], fibrow, nfactors); ++idxstack[depth]; --depth; } while(depth > init_depth && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ /* copy to out */ for(idx_t f=0; f < nfactors; ++f) { out[f] = buf[init_depth+1][f]; } } static void p_csf_mttkrp_root_tiled3( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[ct->dim_perm[1]]->vals; val_t const * const bvals = mats[ct->dim_perm[2]]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; val_t * const restrict accumF = (val_t *) thds[omp_get_thread_num()].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; val_t * const restrict mv = ovals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update to M */ val_t const * const restrict av = avals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { mv[r] += accumF[r] * av[r]; } } } } static void p_csf_mttkrp_root3( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[ct->dim_perm[1]]->vals; val_t const * const bvals = mats[ct->dim_perm[2]]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; val_t * const restrict accumF = (val_t *) thds[omp_get_thread_num()].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; val_t * const restrict mv = ovals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update to M */ val_t const * const restrict av = avals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { mv[r] += accumF[r] * av[r]; } } } } static void p_csf_mttkrp_root3_reuse_adaptive( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, rcsf_seq_adaptive * const seq_rcsfs, thd_info * const thds) { assert(ct->nmodes == 3); int const degree = seq_rcsfs->n_imten; splatt_csf * const rcsf = seq_rcsfs->rcsfs; // zero or one rcsf. val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[ct->dim_perm[1]]->vals; val_t const * const bvals = mats[ct->dim_perm[2]]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; val_t * const rcsf_vals = (degree == 0) ? NULL : rcsf->pt[tile_id].vals; val_t * const restrict accumF = (val_t *) thds[omp_get_thread_num()].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; if (rcsf_vals != NULL) { #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; val_t * const restrict mv = ovals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { // save intermediate values in rcsf. val_t * const restrict rcsf_vv = rcsf_vals + (f * nfactors); /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { rcsf_vv[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { rcsf_vv[r] += v * bv[r]; } } /* scale inner products by row of A and update to M */ val_t const * const restrict av = avals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { mv[r] += rcsf_vv[r] * av[r]; } } } } else { // no rcsf need to store. #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; val_t * const restrict mv = ovals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { // save intermediate values in rcsf. val_t * const restrict rcsf_vv = rcsf_vals + (f * nfactors); /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update to M */ val_t const * const restrict av = avals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { mv[r] += accumF[r] * av[r]; } } } } return; } static void p_csf_mttkrp_internal3( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[ct->dim_perm[0]]->vals; val_t const * const bvals = mats[ct->dim_perm[2]]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; val_t * const restrict accumF = (val_t *) thds[omp_get_thread_num()].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; /* root row */ val_t const * const restrict rv = avals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] += v * bv[r]; } } /* write to fiber row */ val_t * const restrict ov = ovals + (fids[f] * nfactors); omp_set_lock(locks + (fids[f] % NLOCKS)); for(idx_t r=0; r < nfactors; ++r) { ov[r] += rv[r] * accumF[r]; } omp_unset_lock(locks + (fids[f] % NLOCKS)); } } } static void p_csf_mttkrp_internal3_reuse_adaptive( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, rcsf_seq_adaptive const * const seq_rcsfs, thd_info * const thds) { assert(ct->nmodes == 3); int const degree = seq_rcsfs->n_imten; splatt_csf const * const rcsf = seq_rcsfs->rcsfs; // only one rcsf assert (degree == 1); val_t const * const vals = rcsf->pt[tile_id].vals; idx_t const * const restrict sptr = rcsf->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = rcsf->pt[tile_id].fptr[1]; // NULL idx_t const * const restrict sids = rcsf->pt[tile_id].fids[0]; // NULL idx_t const * const restrict fids = rcsf->pt[tile_id].fids[1]; idx_t const * const restrict inds = rcsf->pt[tile_id].fids[2]; // NULL assert (fptr == NULL && fids == NULL && inds == NULL); val_t const * const avals = mats[ct->dim_perm[0]]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; idx_t const nslices = rcsf->pt[tile_id].nfibs[0]; #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; /* root row */ val_t const * const restrict rv = avals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* write to fiber row */ val_t * const restrict ov = ovals + (fids[f] * nfactors); val_t const * const rcsf_vv = vals + (f * nfactors); omp_set_lock(locks + (fids[f] % NLOCKS)); for(idx_t r=0; r < nfactors; ++r) { ov[r] += rv[r] * rcsf_vv[r]; } omp_unset_lock(locks + (fids[f] % NLOCKS)); } } } static void p_csf_mttkrp_leaf3( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[ct->dim_perm[0]]->vals; val_t const * const bvals = mats[ct->dim_perm[1]]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; val_t * const restrict accumF = (val_t *) thds[omp_get_thread_num()].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; /* root row */ val_t const * const restrict rv = avals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* fill fiber with hada */ val_t const * const restrict av = bvals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = rv[r] * av[r]; } /* foreach nnz in fiber, scale with hada and write to ovals */ for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t * const restrict ov = ovals + (inds[jj] * nfactors); omp_set_lock(locks + (inds[jj] % NLOCKS)); for(idx_t r=0; r < nfactors; ++r) { ov[r] += v * accumF[r]; } omp_unset_lock(locks + (inds[jj] % NLOCKS)); } } } } static void p_csf_mttkrp_root_tiled( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { /* extract tensor structures */ idx_t const nmodes = ct->nmodes; val_t const * const vals = ct->pt[tile_id].vals; /* empty tile, just return */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_root_tiled3(ct, tile_id, mats, thds); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[ct->dim_perm[m]]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfibs = ct->pt[tile_id].nfibs[0]; assert(nfibs <= mats[MAX_NMODES]->I); for(idx_t s=0; s < nfibs; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; assert(fid < mats[MAX_NMODES]->I); p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids, vals, mvals, nmodes, nfactors); val_t * const restrict orow = ovals + (fid * nfactors); val_t const * const restrict obuf = buf[0]; for(idx_t f=0; f < nfactors; ++f) { orow[f] += obuf[f]; } } /* end foreach outer slice */ } static void p_csf_mttkrp_root( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { /* extract tensor structures */ idx_t const nmodes = ct->nmodes; val_t const * const vals = ct->pt[tile_id].vals; /* empty tile, just return */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_root3(ct, tile_id, mats, thds); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[ct->dim_perm[m]]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfibs = ct->pt[tile_id].nfibs[0]; assert(nfibs <= mats[MAX_NMODES]->I); #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nfibs; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; assert(fid < mats[MAX_NMODES]->I); p_propagate_up(buf[0], buf, idxstack, 0, s, fp, fids, vals, mvals, nmodes, nfactors); val_t * const restrict orow = ovals + (fid * nfactors); val_t const * const restrict obuf = buf[0]; for(idx_t f=0; f < nfactors; ++f) { orow[f] += obuf[f]; } } /* end foreach outer slice */ } static void p_spt2t_add_hada ( val_t * const out_vals, idx_t const nslices, idx_t const * const in_sptr, idx_t const * const in_fids, val_t const * const in_vals, matrix_t const * const mats) { idx_t const nfactors = mats->J; val_t const * const mat_vals = mats->vals; // Loop on mode-(N-3), loop slices #pragma omp for schedule(dynamic, 16) nowait for (idx_t s=0; s<nslices; ++s) { val_t * const restrict out_row = out_vals + (nfactors * s); // Loop on mode-(N-2), loop fibers for ( idx_t f=in_sptr[s]; f<in_sptr[s+1]; ++f) { val_t * const restrict in_row = in_vals + (nfactors * f); val_t * const restrict mat_row = mat_vals + (nfactors * in_fids[f]); for(idx_t r=0; r<nfactors; ++r) { out_row[r] += in_row[r] * mat_row[r]; } } } } static void p_spt2m_add_hada ( matrix_t * const omats, idx_t const nslices, idx_t const * const sptr, idx_t const * const fids, val_t const * const rvals, matrix_t const * const imats) { idx_t const nfactors = imats->J; assert (omats->J == nfactors); assert (rcsf->dims[0] == omats->I); // point to slices, slice: mode-(N-2) * mode-(N-1) val_t const * const ivals = imats->vals; val_t * const ovals = omats->vals; /* if mode-0 is sparse, then some rows of omats are 0s. */ // Loop on mode-(N-3), loop slices #pragma omp for schedule(dynamic, 16) nowait for (idx_t s=0; s<nslices; ++s) { val_t * const restrict orow = ovals + (nfactors * s); // Loop on mode-(N-2), loop fibers for ( idx_t f=sptr[s]; f<sptr[s+1]; ++f) { val_t * const restrict rrow = rvals + (nfactors * f); val_t * const restrict irow = ivals + (nfactors * fids[f]); for(idx_t r=0; r<nfactors; ++r) { orow[r] += rrow[r] * irow[r]; } } } } static void p_spttm ( // splatt_csf * const out_rcsf, val_t * const out_vals, splatt_csf const * const ct, matrix_t * const mats, idx_t const mode) { assert (mode == nmodes-1); // TODO: we only support TTM on N-1 now. assert (ct->dims[mode] == mats->I); idx_t nfactors = mats->J; idx_t const nslices = ct->pt->nfibs[mode-2]; // mode-(N-3), slice: mode-(N-2) * mode-(N-1) idx_t * const ct_sptr = ct->pt->fptr[mode-2]; // Point to mode-(N-2), slice pointer idx_t * const ct_fptr = ct->pt->fptr[mode-1]; // Point to mode-(N-1) idx_t * const ct_fids = ct->pt->fids[mode]; val_t * const ct_vals = ct->pt->vals; val_t * const mat_vals = mats->vals; #pragma omp for schedule(dynamic, 16) nowait // Loop on mode-(N-3), loop slices. The slices is much more than the first level. for (idx_t s=0; s<nslices; ++s) { // Loop on mode-(N-2), loop fibers for ( idx_t f=ct_sptr[s]; f<ct_sptr[s+1]; ++f) { val_t * const restrict out_row = out_vals + (nfactors * f); // Loop on mode-(N-1) for ( idx_t j=ct_fptr[f]; j<ct_fptr[f+1]; ++j) { val_t const v = ct_vals[j]; val_t * const restrict mat_row = mat_vals + (nfactors * ct_fids[j]); for(idx_t r=0; r<nfactors; ++r) { out_row[r] += v * mat_row[r]; } } } } } static void p_csf_mttkrp_root_genreuse_adaptive( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, rcsf_seq_adaptive * const seq_rcsfs, thd_info * const thds) { idx_t const degree = seq_rcsfs->n_imten; idx_t const begin_imten = seq_rcsfs->begin_imten; assert (degree == ct->nmodes-2); /* extract tensor structures */ idx_t const nmodes = ct->nmodes; val_t const * const vals = ct->pt[tile_id].vals; /* empty tile, just return */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_root3_reuse_adaptive(ct, tile_id, mats, seq_rcsfs, thds); // degree = 1 or 0 here return; } assert (degree >= 1); splatt_csf * const rcsfs = seq_rcsfs->rcsfs; idx_t const nfactors = mats[0]->J; if ( degree == nmodes-2) { // Store all intermediate RCSFs sp_timer_t tmptime, spttm_timer, hada_timer; p_spttm ((rcsfs+0)->pt[tile_id].vals, ct, mats[ct->dim_perm[nmodes-1]], nmodes-1); for (idx_t m=nmodes-2; m>=2; --m) { idx_t rloc = nmodes-1-m; // rcsf location is different from computation order. matrix_t * hmat = mats[ct->dim_perm[m]]; idx_t const in_nmodes = (rcsfs+rloc-1)->nmodes; idx_t nslices = (rcsfs+rloc-1)->pt[tile_id].nfibs[in_nmodes-3]; idx_t const * in_sptr = (rcsfs+rloc-1)->pt[tile_id].fptr[in_nmodes-3]; idx_t const * in_fids = (rcsfs+rloc-1)->pt[tile_id].fids[in_nmodes-2]; val_t const * in_vals = (rcsfs+rloc-1)->pt[tile_id].vals; p_spt2t_add_hada ((rcsfs+rloc)->pt[tile_id].vals, nslices, in_sptr, in_fids, in_vals, hmat); } assert ((rcsfs+nmodes-3)->nmodes == 3); idx_t nslices = ct->pt[tile_id].nfibs[0]; idx_t const * in_sptr = ct->pt[tile_id].fptr[0]; idx_t const * in_fids = ct->pt[tile_id].fids[1]; p_spt2m_add_hada (mats[MAX_NMODES], nslices, in_sptr, in_fids, (rcsfs+degree-1)->pt[tile_id].vals, mats[ct->dim_perm[1]]); } else { // Only store useful RCSFs sp_timer_t tmptime, spttm_timer, hada_timer; timer_fstart(&tmptime); idx_t const max_nvals = ct->pt[tile_id].nfibs[nmodes-2] * nfactors; val_t * tmp_vals = (val_t *)splatt_malloc (max_nvals * sizeof(val_t)); memset (tmp_vals, 0, max_nvals * sizeof(val_t)); idx_t const max_nvals_2 = ct->pt[tile_id].nfibs[nmodes-3] * nfactors; val_t * tmp_vals_2 = (val_t *)splatt_malloc (max_nvals_2 * sizeof(val_t)); memset (tmp_vals_2, 0, max_nvals_2 * sizeof(val_t)); /* mode-(nmodes-1), do SpTTM. * Not be saved in RCSF in TWOMODE case. */ p_spttm (tmp_vals, ct, mats[ct->dim_perm[nmodes-1]], nmodes-1); /* mode-(nmodes-2), ... , rmodes[0]+1, do a sequence of Hada-Reduction. * The resulting values still don't be saved in TWOMODE case. */ for (idx_t m=nmodes-2; m>begin_imten; --m) { memset(tmp_vals_2, 0, ct->pt[tile_id].nfibs[m-1]*nfactors * sizeof(val_t)); matrix_t * hmat = mats[ct->dim_perm[m]]; idx_t nslices = ct->pt[tile_id].nfibs[m-1]; idx_t const * in_sptr = ct->pt[tile_id].fptr[m-1]; idx_t const * in_fids = ct->pt[tile_id].fids[m]; p_spt2t_add_hada (tmp_vals_2, nslices, in_sptr, in_fids, tmp_vals, hmat); memcpy(tmp_vals, tmp_vals_2, ct->pt[tile_id].nfibs[m-1]*nfactors * sizeof(val_t)); } /* mode-rmodes[0], do a Hada-Reduction. * The resulting values are saved as RCSF[0]. */ matrix_t * hmat = mats[ct->dim_perm[begin_imten]]; idx_t nslices = ct->pt[tile_id].nfibs[begin_imten-1]; idx_t const * in_sptr = ct->pt[tile_id].fptr[begin_imten-1]; idx_t const * in_fids = ct->pt[tile_id].fids[begin_imten]; p_spt2t_add_hada (rcsfs->pt[tile_id].vals, nslices, in_sptr, in_fids, tmp_vals, hmat); /* mode-(rmodes[1]), ..., (rmodes[degree-1]), do a sequence of Hada-Reduction. * The resulting values are saved as RCSF[1], ... , RCSF[degree-1]. */ for (idx_t rloc=1; rloc<degree; ++rloc) { idx_t mloc = begin_imten - rloc; matrix_t * hmat = mats[ct->dim_perm[mloc]]; idx_t const in_nmodes = (rcsfs+rloc-1)->nmodes; idx_t nslices = (rcsfs+rloc-1)->pt[tile_id].nfibs[in_nmodes-3]; idx_t const * in_sptr = (rcsfs+rloc-1)->pt[tile_id].fptr[in_nmodes-3]; idx_t const * in_fids = (rcsfs+rloc-1)->pt[tile_id].fids[in_nmodes-2]; val_t const * in_vals = (rcsfs+rloc-1)->pt[tile_id].vals; p_spt2t_add_hada ((rcsfs+rloc)->pt[tile_id].vals, nslices, in_sptr, in_fids, in_vals, hmat); } /* if degree is partial. We cannot store all useful intermediate RCSFs. */ /* mode-(rmodes[degree-1]-1), do a sequence of Hada-Reduction. * The resulting values still don't be saved in TWOMODE case. */ idx_t cur_mode = begin_imten - degree; if (cur_mode > 1) { hmat = mats[ct->dim_perm[cur_mode]]; nslices = ct->pt[tile_id].nfibs[cur_mode-1]; in_sptr = ct->pt[tile_id].fptr[cur_mode-1]; in_fids = ct->pt[tile_id].fids[cur_mode]; memset(tmp_vals, 0, nslices*nfactors * sizeof(val_t)); p_spt2t_add_hada (tmp_vals, nslices, in_sptr, in_fids, (rcsfs+degree-1)->pt[tile_id].vals, hmat); for (idx_t m=cur_mode-1; m>1; --m) { // Last one is one mode-2 memset(tmp_vals_2, 0, ct->pt[tile_id].nfibs[m-1]*nfactors * sizeof(val_t)); matrix_t * hmat = mats[ct->dim_perm[m]]; idx_t nslices = ct->pt[tile_id].nfibs[m-1]; idx_t const * in_sptr = ct->pt[tile_id].fptr[m-1]; idx_t const * in_fids = ct->pt[tile_id].fids[m]; p_spt2t_add_hada (tmp_vals_2, nslices, in_sptr, in_fids, tmp_vals, hmat); memcpy(tmp_vals, tmp_vals_2, ct->pt[tile_id].nfibs[m-1]*nfactors * sizeof(val_t)); } /* Last Hadamard-reduction */ nslices = ct->pt[tile_id].nfibs[0]; in_sptr = ct->pt[tile_id].fptr[0]; in_fids = ct->pt[tile_id].fids[1]; p_spt2m_add_hada (mats[MAX_NMODES], nslices, in_sptr, in_fids, tmp_vals, mats[ct->dim_perm[1]]); } else { /* Last Hadamard-reduction */ nslices = ct->pt[tile_id].nfibs[0]; in_sptr = ct->pt[tile_id].fptr[0]; in_fids = ct->pt[tile_id].fids[1]; p_spt2m_add_hada (mats[MAX_NMODES], nslices, in_sptr, in_fids, (rcsfs+degree-1)->pt[tile_id].vals, mats[ct->dim_perm[1]]); } splatt_free(tmp_vals); splatt_free(tmp_vals_2); } } static void p_csf_mttkrp_leaf_tiled3( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[ct->dim_perm[0]]->vals; val_t const * const bvals = mats[ct->dim_perm[1]]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; val_t * const restrict accumF = (val_t *) thds[omp_get_thread_num()].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; /* root row */ val_t const * const restrict rv = avals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* fill fiber with hada */ val_t const * const restrict av = bvals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = rv[r] * av[r]; } /* foreach nnz in fiber, scale with hada and write to ovals */ for(idx_t jj=fptr[f]; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t * const restrict ov = ovals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { ov[r] += v * accumF[r]; } } } } } static void p_csf_mttkrp_leaf_tiled( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { val_t const * const vals = ct->pt[tile_id].vals; idx_t const nmodes = ct->nmodes; /* pass empty tiles */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_leaf_tiled3(ct, tile_id, mats, thds); return; } /* extract tensor structures */ idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[ct->dim_perm[m]]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); } /* foreach outer slice */ idx_t const nouter = ct->pt[tile_id].nfibs[0]; for(idx_t s=0; s < nouter; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; idxstack[0] = s; /* clear out stale data */ for(idx_t m=1; m < nmodes-1; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } /* first buf will always just be a matrix row */ val_t const * const rootrow = mvals[0] + (fid*nfactors); val_t * const rootbuf = buf[0]; for(idx_t f=0; f < nfactors; ++f) { rootbuf[f] = rootrow[f]; } idx_t depth = 0; idx_t const outer_end = fp[0][s+1]; while(idxstack[1] < outer_end) { /* move down to an nnz node */ for(; depth < nmodes-2; ++depth) { /* propogate buf down */ val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); p_assign_hada(buf[depth+1], buf[depth], drow, nfactors); } /* process all nonzeros [start, end) */ idx_t const start = fp[depth][idxstack[depth]]; idx_t const end = fp[depth][idxstack[depth]+1]; p_csf_process_fiber_nolock(mats[MAX_NMODES]->vals, buf[depth], nfactors, start, end, fids[depth+1], vals); /* now move back up to the next unprocessed child */ do { ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ } /* end outer slice loop */ } static void p_csf_mttkrp_leaf( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { /* extract tensor structures */ val_t const * const vals = ct->pt[tile_id].vals; idx_t const nmodes = ct->nmodes; if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_leaf3(ct, tile_id, mats, thds); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[ct->dim_perm[m]]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } /* foreach outer slice */ idx_t const nslices = ct->pt[tile_id].nfibs[0]; #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; idxstack[0] = s; /* clear out stale data */ for(idx_t m=1; m < nmodes-1; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } /* first buf will always just be a matrix row */ val_t const * const restrict rootrow = mvals[0] + (fid*nfactors); val_t * const rootbuf = buf[0]; for(idx_t f=0; f < nfactors; ++f) { rootbuf[f] = rootrow[f]; } idx_t depth = 0; idx_t const outer_end = fp[0][s+1]; while(idxstack[1] < outer_end) { /* move down to an nnz node */ for(; depth < nmodes-2; ++depth) { /* propogate buf down */ val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); p_assign_hada(buf[depth+1], buf[depth], drow, nfactors); } /* process all nonzeros [start, end) */ idx_t const start = fp[depth][idxstack[depth]]; idx_t const end = fp[depth][idxstack[depth]+1]; p_csf_process_fiber_lock(mats[MAX_NMODES]->vals, buf[depth], nfactors, start, end, fids[depth+1], vals); /* now move back up to the next unprocessed child */ do { ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ } /* end outer slice loop */ } static void p_csf_mttkrp_internal_tiled3( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, thd_info * const thds) { assert(ct->nmodes == 3); val_t const * const vals = ct->pt[tile_id].vals; idx_t const * const restrict sptr = ct->pt[tile_id].fptr[0]; idx_t const * const restrict fptr = ct->pt[tile_id].fptr[1]; idx_t const * const restrict sids = ct->pt[tile_id].fids[0]; idx_t const * const restrict fids = ct->pt[tile_id].fids[1]; idx_t const * const restrict inds = ct->pt[tile_id].fids[2]; val_t const * const avals = mats[ct->dim_perm[0]]->vals; val_t const * const bvals = mats[ct->dim_perm[2]]->vals; val_t * const ovals = mats[MAX_NMODES]->vals; idx_t const nfactors = mats[MAX_NMODES]->J; val_t * const restrict accumF = (val_t *) thds[omp_get_thread_num()].scratch[0]; idx_t const nslices = ct->pt[tile_id].nfibs[0]; for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (sids == NULL) ? s : sids[s]; /* root row */ val_t const * const restrict rv = avals + (fid * nfactors); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * nfactors); for(idx_t r=0; r < nfactors; ++r) { accumF[r] += v * bv[r]; } } /* write to fiber row */ val_t * const restrict ov = ovals + (fids[f] * nfactors); for(idx_t r=0; r < nfactors; ++r) { ov[r] += rv[r] * accumF[r]; } } } } static void p_csf_mttkrp_internal_tiled( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds) { /* extract tensor structures */ idx_t const nmodes = ct->nmodes; val_t const * const vals = ct->pt[tile_id].vals; /* pass empty tiles */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_internal_tiled3(ct, tile_id, mats, thds); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; /* find out which level in the tree this is */ idx_t outdepth = csf_mode_depth(mode, ct->dim_perm, nmodes); val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[ct->dim_perm[m]]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } val_t * const ovals = mats[MAX_NMODES]->vals; /* foreach outer slice */ idx_t const nslices = ct->pt[tile_id].nfibs[0]; for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; /* push outer slice and fill stack */ idxstack[0] = s; for(idx_t m=1; m <= outdepth; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } /* fill first buf */ val_t const * const restrict rootrow = mvals[0] + (fid*nfactors); for(idx_t f=0; f < nfactors; ++f) { buf[0][f] = rootrow[f]; } /* process entire subtree */ idx_t depth = 0; while(idxstack[1] < fp[0][s+1]) { /* propagate values down to outdepth-1 */ for(; depth < outdepth; ++depth) { val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); p_assign_hada(buf[depth+1], buf[depth], drow, nfactors); } /* write to output and clear buf[outdepth] for next subtree */ idx_t const noderow = fids[outdepth][idxstack[outdepth]]; /* propagate value up to buf[outdepth] */ p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth], fp, fids, vals, mvals, nmodes, nfactors); val_t * const restrict outbuf = ovals + (noderow * nfactors); p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors); /* backtrack to next unfinished node */ do { ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ } /* end foreach outer slice */ } static void p_csf_mttkrp_internal( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, idx_t const mode, thd_info * const thds) { /* extract tensor structures */ idx_t const nmodes = ct->nmodes; val_t const * const vals = ct->pt[tile_id].vals; /* pass empty tiles */ if(vals == NULL) { return; } if(nmodes == 3) { p_csf_mttkrp_internal3(ct, tile_id, mats, thds); return; } idx_t const * const * const restrict fp = (idx_t const * const *) ct->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) ct->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; /* find out which level in the tree this is */ idx_t outdepth = csf_mode_depth(mode, ct->dim_perm, nmodes); val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = omp_get_thread_num(); for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[ct->dim_perm[m]]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } val_t * const ovals = mats[MAX_NMODES]->vals; /* foreach outer slice */ idx_t const nslices = ct->pt[tile_id].nfibs[0]; #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; /* push outer slice and fill stack */ idxstack[0] = s; for(idx_t m=1; m <= outdepth; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } /* fill first buf */ val_t const * const restrict rootrow = mvals[0] + (fid*nfactors); for(idx_t f=0; f < nfactors; ++f) { buf[0][f] = rootrow[f]; } /* process entire subtree */ idx_t depth = 0; while(idxstack[1] < fp[0][s+1]) { /* propagate values down to outdepth-1 */ for(; depth < outdepth; ++depth) { val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); p_assign_hada(buf[depth+1], buf[depth], drow, nfactors); } /* write to output and clear buf[outdepth] for next subtree */ idx_t const noderow = fids[outdepth][idxstack[outdepth]]; /* propagate value up to buf[outdepth] */ p_propagate_up(buf[outdepth], buf, idxstack, outdepth,idxstack[outdepth], fp, fids, vals, mvals, nmodes, nfactors); val_t * const restrict outbuf = ovals + (noderow * nfactors); omp_set_lock(locks + (noderow % NLOCKS)); p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors); omp_unset_lock(locks + (noderow % NLOCKS)); /* backtrack to next unfinished node */ do { ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ } /* end foreach outer slice */ } static void p_csf_mttkrp_internal_reuse_adaptive( splatt_csf const * const ct, idx_t const tile_id, matrix_t ** mats, rcsf_seq_adaptive const * const seq_rcsfs, idx_t const mode, thd_info * const thds) { /* extract tensor structures */ idx_t const nmodes_ct = ct->nmodes; /* pass empty tiles */ if(ct->pt[tile_id].vals == NULL) { return; } if(nmodes_ct == 3) { p_csf_mttkrp_internal3_reuse_adaptive(ct, tile_id, mats, seq_rcsfs, thds); return; } // nmodes > 3 idx_t const degree = seq_rcsfs->n_imten; idx_t const begin_imten = seq_rcsfs->begin_imten; splatt_csf const * const rcsfs = seq_rcsfs->rcsfs; // rdims: saved rcsfs. cur_rdims: needed rcsfs. // Compare the two to get in_deg, reused rcsfs, and the rest modes need to process. // Update rdims to cur_rdims. // reuse the results rcsfs[0, ..., indeg-1]. // modes used in the current MTTKRP, using mode location in CSF tree to represent. idx_t * cur_rdims = (idx_t *)splatt_malloc( (nmodes_ct-1) * sizeof(idx_t)); /* find out which level in the tree this is */ idx_t outdepth = csf_mode_depth(mode, ct->dim_perm, nmodes_ct); rcsf_reverse_mode_order(begin_imten, outdepth, cur_rdims); //the first entry shows the reused rcsfs. idx_t indeg = 0; for (idx_t i=0; i<degree; ++i) { if ( begin_imten - i == cur_rdims[i] ) ++ indeg; } free(cur_rdims); // Don't write back to rcsf, write in buf splatt_csf const * const reuse_rcsf = rcsfs + indeg - 1; // The actual reused rcsf, which is the last possible one. idx_t const nmodes = reuse_rcsf->nmodes; // nmodes of reusef_rcsf idx_t const valid_nmodes = nmodes-1; // the last mode is for the vector with the length of nfactors. assert (valid_nmodes >= 2); /* pass empty tiles */ if(reuse_rcsf->pt[tile_id].vals == NULL) { // TODO: may have problem when tiling. return; } val_t const * const vals = reuse_rcsf->pt[tile_id].vals; idx_t const * const * const restrict fp = (idx_t const * const *) reuse_rcsf->pt[tile_id].fptr; idx_t const * const * const restrict fids = (idx_t const * const *) reuse_rcsf->pt[tile_id].fids; idx_t const nfactors = mats[0]->J; // TODO: allocate too much more space. val_t * mvals[MAX_NMODES]; val_t * buf[MAX_NMODES]; idx_t idxstack[MAX_NMODES]; int const tid = omp_get_thread_num(); for(idx_t m=0; m < nmodes_ct; ++m) { mvals[m] = mats[ct->dim_perm[m]]->vals; /* grab the next row of buf from thds */ buf[m] = ((val_t *) thds[tid].scratch[2]) + (nfactors * m); memset(buf[m], 0, nfactors * sizeof(val_t)); } val_t * const ovals = mats[MAX_NMODES]->vals; /* foreach outer slice */ idx_t const nslices = reuse_rcsf->pt[tile_id].nfibs[0]; // begin from root #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { idx_t const fid = (fids[0] == NULL) ? s : fids[0][s]; idxstack[0] = s; for(idx_t m=1; m <= outdepth; ++m) { idxstack[m] = fp[m-1][idxstack[m-1]]; } /* first buf will always just be a matrix row */ val_t const * const restrict rootrow = mvals[0] + (fid * nfactors); for(idx_t f=0; f < nfactors; ++f) { buf[0][f] = rootrow[f]; } idx_t depth = 0; // current depth // Each thread executes its own slice. while(idxstack[1] < fp[0][s+1]) { /* move down to an nnz node */ for(; depth < outdepth; ++depth) { /* propogate buf down */ val_t const * const restrict drow = mvals[depth+1] + (fids[depth+1][idxstack[depth+1]] * nfactors); p_assign_hada(buf[depth+1], buf[depth], drow, nfactors); } // after the loop, depth == outdepth now. /* write to output and clear buf[outdepth] for next subtree */ idx_t const noderow = fids[outdepth][idxstack[outdepth]]; val_t * const restrict outbuf = ovals + (noderow * nfactors); /* propagate value up to buf[outdepth] */ if (outdepth < nmodes - 2) { p_propagate_up_reuse(buf[outdepth], buf, idxstack, outdepth, idxstack[outdepth], fp, fids, vals, mvals, nmodes, nfactors); omp_set_lock(locks + (noderow % NLOCKS)); p_add_hada_clear(outbuf, buf[outdepth], buf[outdepth-1], nfactors); omp_unset_lock(locks + (noderow % NLOCKS)); } else if (outdepth == nmodes -2) { omp_set_lock(locks + (noderow % NLOCKS)); p_add_hada(outbuf, vals+idxstack[outdepth]*nfactors, buf[outdepth-1], nfactors); omp_unset_lock(locks + (noderow % NLOCKS)); } else { printf("ERROR for outdepth.\n"); } /* now move back up to the next unprocessed child */ do { ++idxstack[depth]; --depth; } while(depth > 0 && idxstack[depth+1] == fp[depth][idxstack[depth]+1]); } /* end DFS */ } /* end outer slice loop */ } /* determine which function to call */ static void p_root_decide( splatt_csf const * const tensor, matrix_t ** mats, idx_t const mode, thd_info * const thds, double const * const opts) { idx_t const nmodes = tensor->nmodes; #pragma omp parallel { timer_start(&thds[omp_get_thread_num()].ttime); /* tile id */ idx_t tid = 0; switch(tensor->which_tile) { case SPLATT_NOTILE: p_csf_mttkrp_root(tensor, 0, mats, thds); break; case SPLATT_DENSETILE: /* this mode may not be tiled due to minimum tiling depth */ if(opts[SPLATT_OPTION_TILEDEPTH] > 0) { for(idx_t t=0; t < tensor->ntiles; ++t) { p_csf_mttkrp_root(tensor, t, mats, thds); #pragma omp barrier } } else { /* distribute tiles to threads */ #pragma omp for schedule(dynamic, 1) nowait for(idx_t t=0; t < tensor->tile_dims[mode]; ++t) { tid = get_next_tileid(TILE_BEGIN, tensor->tile_dims, nmodes, mode, t); while(tid != TILE_END) { p_csf_mttkrp_root_tiled(tensor, tid, mats, thds); tid = get_next_tileid(tid, tensor->tile_dims, nmodes, mode, t); } } } break; /* XXX */ case SPLATT_SYNCTILE: break; case SPLATT_COOPTILE: break; } timer_stop(&thds[omp_get_thread_num()].ttime); } /* end omp parallel */ } static void p_root_decide_genreuse_adaptive( splatt_csf const * const tensor, matrix_t ** mats, rcsf_seq_adaptive * const seq_rcsfs, idx_t const mode, thd_info * const thds, double const * const opts) { #pragma omp parallel { timer_start(&thds[omp_get_thread_num()].ttime); assert(tensor->which_tile == SPLATT_NOTILE); p_csf_mttkrp_root_genreuse_adaptive(tensor, 0, mats, seq_rcsfs, thds); timer_stop(&thds[omp_get_thread_num()].ttime); } /* end omp parallel */ } static void p_leaf_decide( splatt_csf const * const tensor, matrix_t ** mats, idx_t const mode, thd_info * const thds, double const * const opts) { idx_t const nmodes = tensor->nmodes; idx_t const depth = nmodes - 1; #pragma omp parallel { timer_start(&thds[omp_get_thread_num()].ttime); /* tile id */ idx_t tid = 0; switch(tensor->which_tile) { case SPLATT_NOTILE: p_csf_mttkrp_leaf(tensor, 0, mats, thds); break; case SPLATT_DENSETILE: /* this mode may not be tiled due to minimum tiling depth */ if(opts[SPLATT_OPTION_TILEDEPTH] > depth) { for(idx_t t=0; t < tensor->ntiles; ++t) { p_csf_mttkrp_leaf(tensor, 0, mats, thds); } } else { // #pragma omp for schedule(dynamic, 1) nowait for(idx_t t=0; t < tensor->tile_dims[mode]; ++t) { tid = get_next_tileid(TILE_BEGIN, tensor->tile_dims, nmodes, mode, t); while(tid != TILE_END) { p_csf_mttkrp_leaf_tiled(tensor, tid, mats, thds); tid = get_next_tileid(tid, tensor->tile_dims, nmodes, mode, t); } } } break; /* XXX */ case SPLATT_SYNCTILE: break; case SPLATT_COOPTILE: break; } timer_stop(&thds[omp_get_thread_num()].ttime); } /* end omp parallel */ } static void p_intl_decide( splatt_csf const * const tensor, matrix_t ** mats, idx_t const mode, thd_info * const thds, double const * const opts) { idx_t const nmodes = tensor->nmodes; idx_t const depth = csf_mode_depth(mode, tensor->dim_perm, nmodes); #pragma omp parallel { timer_start(&thds[omp_get_thread_num()].ttime); /* tile id */ idx_t tid = 0; switch(tensor->which_tile) { case SPLATT_NOTILE: p_csf_mttkrp_internal(tensor, 0, mats, mode, thds); break; case SPLATT_DENSETILE: /* this mode may not be tiled due to minimum tiling depth */ if(opts[SPLATT_OPTION_TILEDEPTH] > depth) { for(idx_t t=0; t < tensor->ntiles; ++t) { p_csf_mttkrp_internal(tensor, t, mats, mode, thds); } } else { #pragma omp for schedule(dynamic, 1) nowait for(idx_t t=0; t < tensor->tile_dims[mode]; ++t) { tid = get_next_tileid(TILE_BEGIN, tensor->tile_dims, nmodes, mode, t); while(tid != TILE_END) { p_csf_mttkrp_internal_tiled(tensor, tid, mats, mode, thds); tid = get_next_tileid(tid, tensor->tile_dims, nmodes, mode, t); } } } break; /* XXX */ case SPLATT_SYNCTILE: break; case SPLATT_COOPTILE: break; } timer_stop(&thds[omp_get_thread_num()].ttime); } /* end omp parallel */ } static void p_intl_decide_reuse_adaptive( splatt_csf const * const tensor, matrix_t ** mats, rcsf_seq_adaptive * const seq_rcsfs, idx_t const mode, thd_info * const thds, double const * const opts) { idx_t const nmodes = tensor->nmodes; idx_t const depth = csf_mode_depth(mode, tensor->dim_perm, nmodes); #pragma omp parallel { timer_start(&thds[omp_get_thread_num()].ttime); assert(tensor->which_tile == SPLATT_NOTILE); p_csf_mttkrp_internal_reuse_adaptive(tensor, 0, mats, seq_rcsfs, mode, thds); timer_stop(&thds[omp_get_thread_num()].ttime); } /* end omp parallel */ } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void mttkrp_csf( splatt_csf const * const tensors, matrix_t ** mats, idx_t const mode, thd_info * const thds, double const * const opts) { /* clear output matrix */ matrix_t * const M = mats[MAX_NMODES]; M->I = tensors[0].dims[mode]; memset(M->vals, 0, M->I * M->J * sizeof(val_t)); omp_set_num_threads(opts[SPLATT_OPTION_NTHREADS]); idx_t nmodes = tensors[0].nmodes; /* find out which level in the tree this is */ idx_t outdepth = MAX_NMODES; /* choose which MTTKRP function to use */ splatt_csf_type which = opts[SPLATT_OPTION_CSF_ALLOC]; switch(which) { case SPLATT_CSF_ONEMODE: outdepth = csf_mode_depth(mode, tensors[0].dim_perm, nmodes); if(outdepth == 0) { p_root_decide(tensors+0, mats, mode, thds, opts); } else if(outdepth == nmodes - 1) { p_leaf_decide(tensors+0, mats, mode, thds, opts); } else { p_intl_decide(tensors+0, mats, mode, thds, opts); } break; case SPLATT_CSF_TWOMODE: /* longest mode handled via second tensor's root */ if(mode == tensors[0].dim_perm[nmodes-1]) { p_root_decide(tensors+1, mats, mode, thds, opts); /* root and internal modes are handled via first tensor */ } else { outdepth = csf_mode_depth(mode, tensors[0].dim_perm, nmodes); if(outdepth == 0) { p_root_decide(tensors+0, mats, mode, thds, opts); } else { p_intl_decide(tensors+0, mats, mode, thds, opts); } } break; case SPLATT_CSF_ALLMODE: p_root_decide(tensors+mode, mats, mode, thds, opts); break; } } void decide_use_csfs( idx_t const nmodes, group_properties * const grp_prop, int const n_grp, idx_t const n_csf, idx_t * use_csfs, // The location of csf in cs. idx_t * use_tags) //0: root mttkrp; 1: reuse mttkrp; 2: intern or leaf mttkrp with recompute. { assert(n_csf <= n_grp); idx_t * grp_modes = (idx_t *)splatt_malloc(n_grp * sizeof(idx_t)); memset(use_tags, 0, nmodes * sizeof(idx_t)); idx_t csf_idx = 0; idx_t mode; for(idx_t g=0; g<n_grp; ++g) { grp_modes[g] = grp_prop[g].memo_mode; } if(n_csf == n_grp) { for(idx_t g=0; g<n_grp-1; ++g) { mode = grp_modes[g]; use_csfs[mode] = g; use_tags[mode] = 0; for(idx_t rm=mode+1; rm<grp_modes[g+1]; ++rm) { use_csfs[rm] = g; use_tags[rm] = 1; } } mode = grp_modes[n_grp-1]; use_csfs[mode] = n_grp-1; use_tags[mode] = 0; for(idx_t rm=mode+1; rm<nmodes; ++rm) { use_csfs[rm] = n_grp-1; use_tags[rm] = 1; } /* n_csf != n_grp */ } else { idx_t * csf_modes = (idx_t *)splatt_malloc(n_csf * sizeof(idx_t)); for(idx_t g=0; g<n_grp; ++g) { if(grp_prop[g].n_imten > 0) { csf_modes[csf_idx] = grp_prop[g].memo_mode; ++ csf_idx; } } assert(csf_idx == n_csf); double mode_loc_csf; for(idx_t g=0; g<n_grp-1; ++g) { mode = grp_modes[g]; mode_loc_csf = locate_ind_special(csf_modes, n_csf, mode, nmodes); if(mode_loc_csf >= 0) { if(mode_loc_csf - (idx_t)mode_loc_csf == 0) { use_csfs[mode] = (idx_t)mode_loc_csf; use_tags[mode] = 0; for(idx_t rm=mode+1; rm<grp_modes[g+1]; ++rm) { use_csfs[rm] = use_csfs[mode]; use_tags[rm] = 1; } } else { use_csfs[mode] = (idx_t)mode_loc_csf; use_tags[mode] = 2; for(idx_t rm=mode+1; rm<grp_modes[g+1]; ++rm) { use_csfs[rm] = use_csfs[mode]; use_tags[rm] = 2; } } } mode = grp_modes[n_grp-1]; mode_loc_csf = locate_ind_special(csf_modes, n_csf, mode, nmodes); if(mode_loc_csf >= 0) { if(mode_loc_csf - (idx_t)mode_loc_csf == 0) { use_csfs[mode] = (idx_t)mode_loc_csf; use_tags[mode] = 0; for(idx_t rm=mode+1; rm<nmodes; ++rm) { use_csfs[rm] = use_csfs[mode]; use_tags[rm] = 1; } } else { use_csfs[mode] = (idx_t)mode_loc_csf; use_tags[mode] = 2; for(idx_t rm=mode+1; rm<nmodes; ++rm) { use_csfs[rm] = use_csfs[mode]; use_tags[rm] = 2; } } } } // Loop g splatt_free(csf_modes); } splatt_free(grp_modes); } // end function void mttkrp_csf_adaptive( splatt_csf const * const tensors, rcsf_seq_adaptive const * const rs_seq, idx_t const n_csf, matrix_t ** mats, idx_t const mode, thd_info * const thds, group_properties * const grp_prop, int const n_grp, idx_t const use_csf, idx_t const use_tag, double const * const opts) { /* clear output matrix */ matrix_t * const M = mats[MAX_NMODES]; M->I = tensors[0].dims[mode]; memset(M->vals, 0, M->I * M->J * sizeof(val_t)); omp_set_num_threads(opts[SPLATT_OPTION_NTHREADS]); idx_t nmodes = tensors[0].nmodes; /* find out which level in the tree this is */ idx_t outdepth = MAX_NMODES; switch(use_tag) { case 0: if(grp_prop[use_csf].n_imten != 0) p_root_decide_genreuse_adaptive(tensors + use_csf, mats, rs_seq + use_csf, mode, thds, opts); else p_root_decide(tensors + use_csf, mats, mode, thds, opts); break; case 1: // Reuse intermediate rcsf. p_intl_decide_reuse_adaptive(tensors + use_csf, mats, rs_seq + use_csf, mode, thds, opts); break; case 2: outdepth = csf_mode_depth(mode, (tensors + use_csf)->dim_perm, nmodes); assert(outdepth > 0); if(outdepth == nmodes - 1) { p_leaf_decide(tensors + use_csf, mats, mode, thds, opts); } else { p_intl_decide(tensors + use_csf, mats, mode, thds, opts); } break; default: printf("Wrong use_tag.\n"); return; } } /****************************************************************************** * DEPRECATED FUNCTIONS *****************************************************************************/ /****************************************************************************** * SPLATT MTTKRP *****************************************************************************/ void mttkrp_splatt( ftensor_t const * const ft, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const nthreads) { if(ft->tiled == SPLATT_SYNCTILE) { mttkrp_splatt_sync_tiled(ft, mats, mode, thds, nthreads); return; } if(ft->tiled == SPLATT_COOPTILE) { mttkrp_splatt_coop_tiled(ft, mats, mode, thds, nthreads); return; } matrix_t * const M = mats[MAX_NMODES]; matrix_t const * const A = mats[ft->dim_perm[1]]; matrix_t const * const B = mats[ft->dim_perm[2]]; idx_t const nslices = ft->dims[mode]; idx_t const rank = M->J; val_t * const mvals = M->vals; memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t)); val_t const * const avals = A->vals; val_t const * const bvals = B->vals; idx_t const * const restrict sptr = ft->sptr; idx_t const * const restrict fptr = ft->fptr; idx_t const * const restrict fids = ft->fids; idx_t const * const restrict inds = ft->inds; val_t const * const restrict vals = ft->vals; sp_timer_t mul_timer, hr_timer; double mul_sec, hr_sec; timer_reset (&mul_timer); timer_reset (&hr_timer); #pragma omp parallel { int const tid = omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; timer_start(&thds[tid].ttime); #pragma omp for schedule(dynamic, 16) nowait for(idx_t s=0; s < nslices; ++s) { val_t * const restrict mv = mvals + (s * rank); /* foreach fiber in slice */ for(idx_t f=sptr[s]; f < sptr[s+1]; ++f) { timer_start(&mul_timer); /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] += v * bv[r]; } } timer_stop(&mul_timer); timer_start(&hr_timer); /* scale inner products by row of A and update to M */ val_t const * const restrict av = avals + (fids[f] * rank); for(idx_t r=0; r < rank; ++r) { mv[r] += accumF[r] * av[r]; } timer_stop(&hr_timer); } } mul_sec = mul_timer.seconds; hr_sec = hr_timer.seconds; printf("mul_sec: %f\n", mul_sec); printf("hr_sec: %f\n", hr_sec); timer_stop(&thds[tid].ttime); } /* end parallel region */ } void mttkrp_splatt_sync_tiled( ftensor_t const * const ft, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const nthreads) { matrix_t * const M = mats[MAX_NMODES]; matrix_t const * const A = mats[ft->dim_perm[1]]; matrix_t const * const B = mats[ft->dim_perm[2]]; idx_t const nslabs = ft->nslabs; idx_t const rank = M->J; val_t * const mvals = M->vals; memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t)); val_t const * const avals = A->vals; val_t const * const bvals = B->vals; idx_t const * const restrict slabptr = ft->slabptr; idx_t const * const restrict sids = ft->sids; idx_t const * const restrict fptr = ft->fptr; idx_t const * const restrict fids = ft->fids; idx_t const * const restrict inds = ft->inds; val_t const * const restrict vals = ft->vals; #pragma omp parallel { int const tid = omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; timer_start(&thds[tid].ttime); #pragma omp for schedule(dynamic, 1) nowait for(idx_t s=0; s < nslabs; ++s) { /* foreach fiber in slice */ for(idx_t f=slabptr[s]; f < slabptr[s+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update to M */ val_t * const restrict mv = mvals + (sids[f] * rank); val_t const * const restrict av = avals + (fids[f] * rank); for(idx_t r=0; r < rank; ++r) { mv[r] += accumF[r] * av[r]; } } } timer_stop(&thds[tid].ttime); } /* end parallel region */ } void mttkrp_splatt_coop_tiled( ftensor_t const * const ft, matrix_t ** mats, idx_t const mode, thd_info * const thds, idx_t const nthreads) { matrix_t * const M = mats[MAX_NMODES]; matrix_t const * const A = mats[ft->dim_perm[1]]; matrix_t const * const B = mats[ft->dim_perm[2]]; idx_t const nslabs = ft->nslabs; idx_t const rank = M->J; val_t * const mvals = M->vals; memset(mvals, 0, ft->dims[mode] * rank * sizeof(val_t)); val_t const * const avals = A->vals; val_t const * const bvals = B->vals; idx_t const * const restrict slabptr = ft->slabptr; idx_t const * const restrict sptr = ft->sptr; idx_t const * const restrict sids = ft->sids; idx_t const * const restrict fptr = ft->fptr; idx_t const * const restrict fids = ft->fids; idx_t const * const restrict inds = ft->inds; val_t const * const restrict vals = ft->vals; #pragma omp parallel { int const tid = omp_get_thread_num(); val_t * const restrict accumF = (val_t *) thds[tid].scratch[0]; val_t * const localm = (val_t *) thds[tid].scratch[1]; timer_start(&thds[tid].ttime); /* foreach slab */ for(idx_t s=0; s < nslabs; ++s) { /* foreach fiber in slab */ #pragma omp for schedule(dynamic, 8) for(idx_t sl=slabptr[s]; sl < slabptr[s+1]; ++sl) { idx_t const slice = sids[sl]; for(idx_t f=sptr[sl]; f < sptr[sl+1]; ++f) { /* first entry of the fiber is used to initialize accumF */ idx_t const jjfirst = fptr[f]; val_t const vfirst = vals[jjfirst]; val_t const * const restrict bv = bvals + (inds[jjfirst] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] = vfirst * bv[r]; } /* foreach nnz in fiber */ for(idx_t jj=fptr[f]+1; jj < fptr[f+1]; ++jj) { val_t const v = vals[jj]; val_t const * const restrict bv = bvals + (inds[jj] * rank); for(idx_t r=0; r < rank; ++r) { accumF[r] += v * bv[r]; } } /* scale inner products by row of A and update thread-local M */ val_t * const restrict mv = localm + ((slice % TILE_SIZES[0]) * rank); val_t const * const restrict av = avals + (fids[f] * rank); for(idx_t r=0; r < rank; ++r) { mv[r] += accumF[r] * av[r]; } } } idx_t const start = s * TILE_SIZES[0]; idx_t const stop = SS_MIN((s+1) * TILE_SIZES[0], ft->dims[mode]); #pragma omp for schedule(static) for(idx_t i=start; i < stop; ++i) { /* map i back to global slice id */ idx_t const localrow = i % TILE_SIZES[0]; for(idx_t t=0; t < nthreads; ++t) { val_t * const threadm = (val_t *) thds[t].scratch[1]; for(idx_t r=0; r < rank; ++r) { mvals[r + (i*rank)] += threadm[r + (localrow*rank)]; threadm[r + (localrow*rank)] = 0.; } } } } /* end foreach slab */ timer_stop(&thds[tid].ttime); } /* end omp parallel */ } /****************************************************************************** * GIGA MTTKRP *****************************************************************************/ void mttkrp_giga( spmatrix_t const * const spmat, matrix_t ** mats, idx_t const mode, val_t * const scratch) { matrix_t * const M = mats[MAX_NMODES]; matrix_t const * const A = mode == 0 ? mats[1] : mats[0]; matrix_t const * const B = mode == 2 ? mats[1] : mats[2]; idx_t const I = spmat->I; idx_t const rank = M->J; idx_t const * const restrict rowptr = spmat->rowptr; idx_t const * const restrict colind = spmat->colind; val_t const * const restrict vals = spmat->vals; #pragma omp parallel { for(idx_t r=0; r < rank; ++r) { val_t * const restrict mv = M->vals + (r * I); val_t const * const restrict av = A->vals + (r * A->I); val_t const * const restrict bv = B->vals + (r * B->I); /* Joined Hadamard products of X, C, and B */ #pragma omp for schedule(dynamic, 16) for(idx_t i=0; i < I; ++i) { for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) { idx_t const a = colind[y] / B->I; idx_t const b = colind[y] % B->I; scratch[y] = vals[y] * av[a] * bv[b]; } } /* now accumulate rows into column of M1 */ #pragma omp for schedule(dynamic, 16) for(idx_t i=0; i < I; ++i) { val_t sum = 0; for(idx_t y=rowptr[i]; y < rowptr[i+1]; ++y) { sum += scratch[y]; } mv[i] = sum; } } } } /****************************************************************************** * TTBOX MTTKRP *****************************************************************************/ /* extend ttbox to high-order tensors. */ void mttkrp_ttbox( sptensor_t const * const tt, matrix_t ** mats, idx_t const mode, val_t * const scratch) { idx_t const nnz = tt->nnz; idx_t ** inds = tt->ind; val_t const * const restrict vals = tt->vals; matrix_t * const M = mats[MAX_NMODES]; idx_t const I = tt->dims[mode]; idx_t const rank = M->J; memset(M->vals, 0, I * rank * sizeof(val_t)); if (tt->type == SPLATT_3MODE) { /*** For 3rd-order tensor **/ assert (tt->nmodes == 3); //mats is in reserve order. matrix_t const * const A = mode == 0 ? mats[1] : mats[0]; matrix_t const * const B = mode == 2 ? mats[1] : mats[2]; assert (rank == B->J); idx_t const * const indM = tt->ind[mode]; idx_t const * const indA = mode == 0 ? tt->ind[1] : tt->ind[0]; idx_t const * const indB = mode == 2 ? tt->ind[1] : tt->ind[2]; for(idx_t r=0; r < rank; ++r) { val_t * const restrict mv = M->vals + (r * I); val_t const * const restrict av = A->vals + (r * A->I); val_t const * const restrict bv = B->vals + (r * B->I); /* stretch out columns of A and B */ #pragma omp parallel for for(idx_t x=0; x < nnz; ++x) { scratch[x] = vals[x] * av[indA[x]] * bv[indB[x]]; } /* now accumulate into m1 */ for(idx_t x=0; x < nnz; ++x) { mv[indM[x]] += scratch[x]; } } } else if (tt->type == SPLATT_NMODE) { assert (tt->nmodes > 3); val_t * scratch_2 = (val_t*)splatt_malloc(nnz * sizeof(val_t)); memset(scratch_2, 0, nnz * sizeof(val_t)); idx_t nmodes = tt->nmodes; idx_t nmats = nmodes - 1; idx_t * const mats_order = (idx_t *) splatt_malloc (nmats * sizeof(*mats_order)); idx_t j = 0; for (int i=nmodes-1; i>=0; --i) { if (i != mode) { mats_order[j] = i; ++ j; } } assert (j == nmats-1); idx_t const * const indM = tt->ind[mode]; for(idx_t r=0; r < rank; ++r) { for (idx_t ii=0; ii<nnz; ++ii) scratch[ii] = tt->vals[ii]; val_t * const restrict mv = M->vals + (r * I); for (idx_t i=0; i<nmats; i++) { matrix_t *tmp_mat = mats[mats_order[i]]; assert (rank == tmp_mat->J); idx_t *tmp_inds = tt->ind[mats_order[i]]; val_t const * const av = tmp_mat->vals + (r * tmp_mat->I); #pragma omp parallel for for(idx_t x=0; x < nnz; ++x) { scratch_2[x] = scratch[x] * av[tmp_inds[x]]; } for (idx_t ii=0; ii<nnz; ++ii) scratch[ii] = scratch_2[ii]; } for(idx_t x=0; x < nnz; ++x) { mv[indM[x]] += scratch[x]; } } free(mats_order); free(scratch_2); } return; } void mttkrp_stream( sptensor_t const * const tt, matrix_t ** mats, idx_t const mode) { matrix_t * const M = mats[MAX_NMODES]; idx_t const I = tt->dims[mode]; idx_t const nfactors = M->J; val_t * const outmat = M->vals; memset(outmat, 0, I * nfactors * sizeof(val_t)); idx_t const nmodes = tt->nmodes; val_t * accum = (val_t *) splatt_malloc(nfactors * sizeof(val_t)); val_t * mvals[MAX_NMODES]; for(idx_t m=0; m < nmodes; ++m) { mvals[m] = mats[m]->vals; } val_t const * const restrict vals = tt->vals; /* stream through nnz */ for(idx_t n=0; n < tt->nnz; ++n) { /* initialize with value */ for(idx_t f=0; f < nfactors; ++f) { accum[f] = vals[n]; } for(idx_t m=0; m < nmodes; ++m) { if(m == mode) { continue; } val_t const * const restrict inrow = mvals[m] + (tt->ind[m][n] * nfactors); for(idx_t f=0; f < nfactors; ++f) { accum[f] *= inrow[f]; } } /* write to output */ val_t * const restrict outrow = outmat + (tt->ind[mode][n] * nfactors); for(idx_t f=0; f < nfactors; ++f) { outrow[f] += accum[f]; } } free(accum); }
tree.c
#include "tree.h" #include "io.h" #include "sort.h" #include <omp.h> #include <stdio.h> #include <string.h> /** * @brief Instantiate a new node of the tree. * * @param key The key of the node * @param value The value of the node * @param parent The parent of the node in the tree * @return Pointer to the node created */ TreeNode *tree_node_new(int key, int value, int parent) { TreeNode *node = (TreeNode *)malloc(sizeof(TreeNode)); assert(node != NULL); node->key = key; node->value = value; node->parent = parent; node->adj = hashmap_new(); assert(node->adj != NULL); return node; } /** * @brief Free a tree node * * @param node Pointer to the node to free */ void tree_node_free(TreeNode *node) { if (node != NULL) { hashmap_free(node->adj); free(node); } } /** * @brief Instantiate a new tree * * @return The new tree */ Tree tree_new() { TreeNode *node = tree_node_new(TREE_NODE_NULL, -1, 0); Tree tree = NULL; cvector_push_back(tree, node); return tree; } /** * @brief Free the tree * * @param tree Pointer to the tree to free */ void tree_free(Tree *tree) { if (*tree != NULL) { int n_nodes = cvector_size((*tree)); int i; for (i = 0; i < n_nodes; i++) { tree_node_free((*tree)[i]); } cvector_free((*tree)); *tree = NULL; } } /** * @brief Add a node to the tree * * @param tree Pointer to the tree * @param node Pointer to the node to add * @return The id of the node in the tree */ int tree_add_node(Tree *tree, TreeNode *node) { cvector_push_back((*tree), node); assert(*tree != NULL); // the malloc has not failed int new_id = cvector_size((*tree)) - 1; TreeNode *parent = (*tree)[node->parent]; hashmap_put(parent->adj, &(node->key), sizeof(int), new_id); assert(new_id != node->parent); return new_id; } /** * @brief Add the subtree rooted in the ns(th) node of the tree * source as a child of the nd(th) node of the tree dest. The * source tree is modified, as the nodes are moved to the * destination tree. * * @param dest Pointer to the destination tree * @param source Pointer to the source tree * @param nd Id of the node in the destination tree * @param ns Id of the node in the source tree */ void tree_add_subtree(Tree *dest, Tree source, int nd, int ns) { // add node ns cvector_vector_type(hashmap_element) neighbours = NULL; hashmap_get_elements(source[ns]->adj, &neighbours); hashmap_free(source[ns]->adj); source[ns]->adj = hashmap_new(); source[ns]->parent = nd; int new_pos = tree_add_node(dest, source[ns]); int num_adj_s = cvector_size(neighbours); // recursively add children int i; for (i = 0; i < num_adj_s; i++) { assert(neighbours[i].value != ns); tree_add_subtree(dest, source, new_pos, neighbours[i].value); } source[ns] = NULL; cvector_free(neighbours); } /** * @brief Merge the subtree of dest rooted in node with id nd * with the subtree of source rooted in ns and store the result in dest. * Also the source tree is modified. * * @param dest The destination tree * @param source The source tree * @param nd Id of the node in the destination tree * @param ns Id of the node in the source tree */ void tree_merge_dfs(Tree *dest, Tree source, int nd, int ns) { int i; cvector_vector_type(hashmap_element) neighbours = NULL; hashmap_get_elements(source[ns]->adj, &neighbours); int num_adj_s = cvector_size(neighbours); // foreach neighbour of node ns in source for (i = 0; i < num_adj_s; i++) { int source_pos = neighbours[i].value; assert(ns != neighbours[i].value); // if a node with the same key(item) is already present in the // children of nd, just increment the counter int dest_pos; if (hashmap_get((*dest)[nd]->adj, neighbours[i].key, sizeof(int), &dest_pos) == MAP_OK) { (*dest)[dest_pos]->value += source[source_pos]->value; tree_merge_dfs(dest, source, dest_pos, source_pos); } else { // otherwise add the child and the subtree rooted in it to // the node nd in dest tree_add_subtree(dest, source, nd, source_pos); } } cvector_free(neighbours); } /** * @brief Merge the trees dest and source and store the result in dest. * The source tree is modified. It is a wrapper for @see tree_merge_dfs() * * @param dest The destination tree * @param source The source tree */ void tree_merge(Tree *dest, Tree source) { tree_merge_dfs(dest, source, 0, 0); } /** * @brief Inserts into the vector nodes the nodes to send * * @param tree The trees from which to get the nodes * @param nodes The vector in which the nodes are put */ void tree_get_nodes(Tree tree, cvector_vector_type(TreeNodeToSend) * nodes) { int num_nodes = cvector_size(tree); for (int i = 0; i < num_nodes; i++) { TreeNodeToSend node; node.key = tree[i]->key; node.value = tree[i]->value; node.parent = tree[i]->parent; cvector_push_back((*nodes), node); } } /** * @brief Print the tree * * @param tree The tree to print */ void tree_print(Tree tree) { int n_nodes = cvector_size(tree); for (int i = 0; i < n_nodes; i++) { printf("Node (%d: %d)\n", tree[i]->key, tree[i]->value); hashmap_print(tree[i]->adj); } } /** * @brief Build a tree given a transaction * * @param rank The rank of the process * @param world_size The number of processes in the world * @param transaction The transaction * @param index_map The map from item to the corresponding id * @param items_count The array of hashmap elements having the item string as a * key and the support count as a value * @param num_items The number of items in the sorted_indices array * @param sorted_indices The array of the sorted indices of the items * @return The built tree */ Tree tree_build_from_transaction(int rank, int world_size, Transaction *transaction, IndexMap index_map, hashmap_element *items_count, int num_items, int *sorted_indices) { int n_items = cvector_size((*transaction)); cvector_vector_type(hashmap_element) elements = NULL; for (int i = 0; i < n_items; i++) { int item_size = cvector_size((*transaction)[i]); hashmap_element element; // consider only items with support >= min_support (in index map) if (hashmap_get(index_map, (*transaction)[i], item_size, &(element.value)) == MAP_OK) { element.key_length = item_size; memcpy(element.key, (*transaction)[i], item_size); cvector_push_back(elements, element); } } transaction_free(transaction); n_items = cvector_size(elements); int *transaction_sorted_indices = (int *)malloc(n_items * sizeof(int)); sort(elements, n_items, transaction_sorted_indices, 0, n_items - 1, 1); Tree tree = tree_new(); for (int i = 0; i < n_items; i++) { assert(transaction_sorted_indices[i] >= 0); assert(transaction_sorted_indices[i] < n_items); Item item = elements[transaction_sorted_indices[i]].key; int item_size = elements[transaction_sorted_indices[i]].key_length; int pos; assert(hashmap_get(index_map, item, item_size, &pos) == MAP_OK); assert(pos >= 0); assert(pos < num_items); assert(sorted_indices[pos] >= 0); assert(sorted_indices[pos] < num_items); TreeNode *node = tree_node_new(sorted_indices[pos], 1, i); assert(node != NULL); assert(tree_add_node(&tree, node) == i + 1); } cvector_free(elements); free(transaction_sorted_indices); return tree; } /** * @brief Build a tree given a list of transactions * * First, we build the trees for the single transactions. * Then, we merge them in a binary-tree-like fashion. * * @param rank The rank of the process * @param world_size The number of processes in the world * @param transactions * @param index_map The map from item to the corresponding id * @param items_count The array of hashmap elements having the item string as a * key and the support count as a value * @param num_items The number of items in the sorted_indices array * @param sorted_indices The array of the sorted indices of the items * @param num_threads The number of threads requested to perform the building * @return The built tree */ Tree tree_build_from_transactions(int rank, int world_size, TransactionsList transactions, IndexMap index_map, hashmap_element *items_count, int num_items, int *sorted_indices, int num_threads) { int n_transactions = cvector_size(transactions); Tree *trees = (Tree *)malloc(n_transactions * sizeof(Tree)); int i, pow; #pragma omp parallel default(none) \ shared(n_transactions, trees, rank, world_size, transactions, index_map, \ items_count, num_items, sorted_indices) private(pow, i) \ num_threads(num_threads) for (pow = 1; pow < 2 * n_transactions; pow *= 2) { int start = pow == 1 ? 0 : pow / 2; #pragma omp for schedule(runtime) for (i = start; i < n_transactions; i += pow) { if (pow > 1) { // at levels > 1, merge two subtrees tree_merge(&trees[i - pow / 2], trees[i]); tree_free(&(trees[i])); } else { // at first level, build the transaction trees trees[i] = tree_build_from_transaction( rank, world_size, &(transactions[i]), index_map, items_count, num_items, sorted_indices); } } } Tree res = trees[0]; free(trees); return res; }
is_initial_device.c
// REQUIRES: powerpc-registered-target // RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-unknown-unknown \ // RUN: -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify -fopenmp -x ir -triple powerpc64le-unknown-unknown -emit-llvm \ // RUN: %t-ppc-host.bc -o - | FileCheck %s -check-prefixes HOST,OUTLINED // RUN: %clang_cc1 -verify -fopenmp -x c -triple powerpc64le-unknown-unknown -emit-llvm -fopenmp-is-device \ // RUN: %s -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s -check-prefixes DEVICE,OUTLINED // RUN: %clang_cc1 -verify -fopenmp-simd -x c -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-unknown-unknown -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify -fopenmp-simd -x ir -triple powerpc64le-unknown-unknown -emit-llvm %t-ppc-host.bc -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -verify -fopenmp-simd -x c -triple powerpc64le-unknown-unknown -emit-llvm -fopenmp-is-device %s -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics int check() { int host = omp_is_initial_device(); int device; #pragma omp target map(tofrom: device) { device = omp_is_initial_device(); } return host + device; } // The host should get a value of 1: // HOST: define{{.*}} @check() // HOST: [[HOST:%.*]] = alloca i32 // HOST: store i32 1, i32* [[HOST]] // OUTLINED: define{{.*}} @{{.*}}omp_offloading{{.*}}(i32*{{.*}} [[DEVICE_ARGUMENT:%.*]]) // OUTLINED: [[DEVICE_ADDR_STORAGE:%.*]] = alloca i32* // OUTLINED: store i32* [[DEVICE_ARGUMENT]], i32** [[DEVICE_ADDR_STORAGE]] // OUTLINED: [[DEVICE_ADDR:%.*]] = load i32*, i32** [[DEVICE_ADDR_STORAGE]] // The outlined function that is called as fallback also runs on the host: // HOST: store i32 1, i32* [[DEVICE_ADDR]] // The device should get a value of 0: // DEVICE: store i32 0, i32* [[DEVICE_ADDR]]
GB_unop__lgamma_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__lgamma_fp32_fp32 // op(A') function: GB_unop_tran__lgamma_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = lgammaf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = lgammaf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = lgammaf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LGAMMA || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__lgamma_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = lgammaf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__lgamma_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ten_tusscher_2004_epi_S3_3.c
//Original Ten Tusscher #include <assert.h> #include <stdlib.h> #include "ten_tusscher_2004_epi_S3_3.h" GET_CELL_MODEL_DATA(init_cell_model_data) { assert(cell_model); if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } //TODO: this should be called only once for the whole mesh, like in the GPU code SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.6743585456438,0.00126116515238777,0.782285143101146,0.781885737321280,0.000172267497323657,0.486193660951379,0.00291820808108493,0.999998382455018,1.89973078307127e-08,1.86451321167615e-05,0.999780198191440,1.00782702931804,0.999999754763967,2.76599036686923e-05,0.357538249293263,10.7085717792583,139.021384569998}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { assert(sv); real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; ///#ifdef EPI real Gks=0.245; ///#endif ///#ifdef ENDO /// real Gks=0.245; ///#endif ///#ifdef MCELL /// real Gks=0.062; ///#endif //Parameters for Ik1 real GK1=5.405; //Parameters for Ito //#ifdef EPI real Gto=0.294; //#endif // #ifdef ENDO // real Gto=0.073; //#endif //#ifdef MCELL // real Gto=0.294; ///#endif //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.4941061664816,0.000306940351318330,0.000126486160649835,0.000251593758331556,0.231852653636147,0.170492615868249,0.109036079095606,4.44796487754522,0.0111149661882113,1.23956736157302,1099.91017026794,0.000314927815763443,0.381236416535235,0.0193513922111542,0.00539385037460332,9.81890868796030e-06}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; ///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; ///Ileak=0.00008f*(CaSR-Cai); Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; #ifdef EPI R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif #ifdef ENDO R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+28)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.; #endif #ifdef MCELL R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; #endif D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
kvstore_dist_server.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file mxnet_node.h * \brief implement mxnet nodes */ #ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_ #include <mxnet/c_api.h> #include <mxnet/kvstore.h> #include <ps/ps.h> #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <memory> #include <functional> #include <future> #include <vector> #include "../profiler/profiler.h" #include "../operator/tensor/elemwise_binary_op-inl.h" #include "../operator/tensor/init_op.h" namespace mxnet { namespace kvstore { // maintain same order in frontend. enum class CommandType { kController, kSetMultiPrecision, kStopServer, kSyncMode, kSetGradientCompression, kSetProfilerParams }; enum class RequestType { kDefaultPushPull, kRowSparsePushPull, kCompressedPushPull }; struct DataHandleType { RequestType requestType; int dtype; }; /*! * Uses Cantor pairing function to generate a unique number given two numbers. * This number can also be inverted to find the unique pair whose Cantor value is this number. * Ref: https://en.wikipedia.org/wiki/Pairing_function#Cantor_pairing_function * \param requestType RequestType * \param dtype integer * \return Cantor value of arguments */ static int GetCommandType(RequestType requestType, int d) { int m = static_cast<int>(requestType); return (((m + d) * (m + d + 1)) / 2) + d; } /*! * Unpairs Cantor value and finds the two integers used to pair. * Then returns DataHandleType object with those numbers. * \param cmd DataHandleCommand generated by GetCommandType function * \return DataHandleType */ static DataHandleType DepairDataHandleType(int cmd) { int w = std::floor((std::sqrt(8 * cmd + 1) - 1)/2); int t = ((w * w) + w) / 2; int y = cmd - t; int x = w - y; CHECK_GE(x, 0); CHECK_GE(y, 0); DataHandleType type; type.requestType = static_cast<RequestType>(x); type.dtype = y; return type; } /** * \brief executor runs a function using the thread called \ref Start */ class Executor { public: /** * \brief start the executor */ void Start() { std::unique_lock<std::mutex> lk(mu_); while (true) { cond_.wait(lk, [this]{return !queue_.empty();}); Block blk = std::move(queue_.front()); queue_.pop(); lk.unlock(); if (blk.f) { blk.f(); blk.p->set_value(); } else { blk.p->set_value(); break; } lk.lock(); } } /** * \brief function */ typedef std::function<void()> Func; /** * \brief let the thread called \ref Start to exec a function. threadsafe */ void Exec(const Func& func) { Block blk(func); auto fut = blk.p->get_future(); { std::lock_guard<std::mutex> lk(mu_); queue_.push(std::move(blk)); cond_.notify_one(); } fut.wait(); } /** * \brief stop the thread, threadsafe */ void Stop() { Exec(Func()); } private: struct Block { explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { } Func f; std::shared_ptr<std::promise<void>> p; }; std::queue<Block> queue_; std::mutex mu_; std::condition_variable cond_; }; class KVStoreDistServer { public: KVStoreDistServer() { using namespace std::placeholders; ps_server_ = new ps::KVServer<char>(0); static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle( std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2)); ps_server_->set_request_handle( std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3)); sync_mode_ = false; gradient_compression_ = std::make_shared<GradientCompression>(); log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false); } ~KVStoreDistServer() { profiler::Profiler::Get()->SetState(profiler::Profiler::ProfilerState(0)); delete ps_server_; } void set_controller(const KVStore::Controller& controller) { CHECK(controller); controller_ = controller; } void set_updater(const KVStore::Updater& updater) { CHECK(updater); updater_ = updater; } /** * \brief blocked until received the command \a kSyncMode */ void Run() { exec_.Start(); } private: struct UpdateBuf { std::vector<ps::KVMeta> request; NDArray merged; // temp_array is used to cast received values as float32 for computation if required NDArray temp_array; }; void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) { CommandType recved_type = static_cast<CommandType>(recved.head); switch (recved_type) { case CommandType::kStopServer: exec_.Stop(); break; case CommandType::kSyncMode: sync_mode_ = true; break; case CommandType::kSetGradientCompression: gradient_compression_->DecodeParams(recved.body); break; case CommandType::kSetProfilerParams: // last char is the type of profiler command ProcessServerProfilerCommands(static_cast<KVStoreServerProfilerCommand> (recved.body.back() - '0'), recved.body); break; case CommandType::kSetMultiPrecision: // uses value 1 for message id from frontend if (!multi_precision_) { multi_precision_ = true; CreateMultiPrecisionCopies(); } break; case CommandType::kController: // this uses value 0 for message id from frontend // let the main thread to execute ctrl, which is necessary for python exec_.Exec([this, recved]() { CHECK(controller_); controller_(recved.head, recved.body); }); break; } app->Response(recved); } /* * For keys already initialized, if necessary create stored_realt. * This will only be used if by some wrong usage of kvstore, * some keys are initialized before optimizer is set. */ void CreateMultiPrecisionCopies() { for (auto const &stored_entry : store_) { const int key = stored_entry.first; const NDArray &stored = stored_entry.second; if (stored.dtype() != mshadow::kFloat32) { auto &stored_realt = store_realt_[key]; if (stored.storage_type() == kRowSparseStorage) { stored_realt = NDArray(kRowSparseStorage, stored.shape(), stored.ctx(), true, mshadow::kFloat32); } else { stored_realt = NDArray(stored.shape(), stored.ctx(), false, mshadow::kFloat32); } auto &update = update_buf_[key]; if (!update.merged.is_none()) { if (update.merged.storage_type() == kRowSparseStorage) { update.merged = NDArray(kRowSparseStorage, update.merged.shape(), update.merged.ctx(), true, mshadow::kFloat32); } else { update.merged = NDArray(update.merged.shape(), update.merged.ctx(), false, mshadow::kFloat32); } } CHECK(update.request.size() == 0) << ps::MyRank() << "Multiprecision mode can not be set while pushes are underway." << "Please set optimizer before pushing keys." << key << " " << update.request.size(); CopyFromTo(stored, stored_realt); } } for (auto const &stored_realt_entry : store_realt_) { stored_realt_entry.second.WaitToRead(); } } void ProcessServerProfilerCommands(KVStoreServerProfilerCommand type, const std::string& body) { switch (type) { case KVStoreServerProfilerCommand::kSetConfig: SetProfilerConfig(body.substr(0, body.size() - 1)); break; case KVStoreServerProfilerCommand::kState: MXSetProfilerState(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kPause: MXProfilePause(static_cast<int>(body.front() - '0')); break; case KVStoreServerProfilerCommand::kDump: MXDumpProfile(static_cast<int>(body.front() - '0')); break; } } void SetProfilerConfig(std::string params_str) { std::vector<std::string> elems; mxnet::kvstore::split(params_str, ',', std::back_inserter(elems)); std::vector<const char*> ckeys; std::vector<const char*> cvals; ckeys.reserve(elems.size()); cvals.reserve(elems.size()); for (size_t i=0; i < elems.size(); i++) { std::vector<std::string> parts; mxnet::kvstore::split(elems[i], ':', std::back_inserter(parts)); CHECK_EQ(parts.size(), 2) << "Improper profiler config passed from worker"; CHECK(!parts[0].empty()) << "ProfilerConfig parameter is empty"; CHECK(!parts[1].empty()) << "ProfilerConfig value is empty for parameter "<< parts[0]; if (parts[0] == "filename") { parts[1] = "rank" + std::to_string(ps::MyRank()) + "_" + parts[1]; } char* ckey = new char[parts[0].length() + 1]; std::snprintf(ckey, parts[0].length() + 1, "%s", parts[0].c_str()); ckeys.push_back(ckey); char* cval = new char[parts[1].length() + 1]; std::snprintf(cval, parts[1].length() + 1, "%s", parts[1].c_str()); cvals.push_back(cval); } MXSetProfilerConfig(elems.size(), &ckeys[0], &cvals[0]); for (size_t i=0; i < ckeys.size(); i++) { delete[] ckeys[i]; delete[] cvals[i]; } } void DataHandleEx(const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { DataHandleType type = DepairDataHandleType(req_meta.cmd); switch (type.requestType) { case RequestType::kRowSparsePushPull: DataHandleRowSparse(type, req_meta, req_data, server); break; case RequestType::kCompressedPushPull: DataHandleCompressed(type, req_meta, req_data, server); break; case RequestType::kDefaultPushPull: DataHandleDefault(type, req_meta, req_data, server); break; } } inline bool has_multi_precision_copy(const DataHandleType type) { return multi_precision_ && type.dtype != mshadow::kFloat32; } inline void ApplyUpdates(const DataHandleType type, const int key, const ps::KVPairs<char>& req_data, UpdateBuf *update_buf, ps::KVServer<char>* server) { if (!sync_mode_ || update_buf->request.size() == (size_t) ps::NumWorkers()) { // let the main thread to execute updater_, which is necessary for python auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; auto& update = sync_mode_ ? update_buf->merged : update_buf->temp_array; if (updater_) { exec_.Exec([this, key, &update, &stored](){ CHECK(updater_); updater_(key, update, &stored); }); } else { CHECK(sync_mode_) << "Updater needs to be set for async mode"; // if no updater, just copy CopyFromTo(update_buf->merged, &stored); } if (log_verbose_) { LOG(INFO) << "sent response to " << update_buf->request.size() << " workers"; } for (const auto& req : update_buf->request) { /** * Request can be for either push, pull or pushpull * If pull flag is set, respond immediately with the updated values * Otherwise, only send the notification */ if (req.pull) { DefaultStorageResponse(type, key, req, req_data, server); } else { server->Response(req); } } update_buf->request.clear(); if (has_multi_precision_copy(type)) CopyFromTo(stored, store_[key]); stored.WaitToRead(); } else { update_buf->merged.WaitToRead(); } } void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices, const int64_t master_key, const int64_t num_rows) { indices[0] = 0; for (int64_t i = 1; i <= num_rows; i++) { int key = DecodeKey(keys[i]); auto row_id = key - master_key; indices[i - 1] = row_id; } } void AccumulateRowSparseGrads(const DataHandleType type, const NDArray& recved, UpdateBuf* updateBuf) { NDArray out(kRowSparseStorage, updateBuf->merged.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) CopyFromTo(recved, updateBuf->temp_array); const NDArray& to_merge = has_multi_precision_copy(type) ? updateBuf->temp_array : recved; // accumulate row_sparse gradients using namespace mshadow; Engine::Get()->PushAsync( [to_merge, updateBuf, out](RunContext ctx, Engine::CallbackOnComplete on_complete) { op::ElemwiseBinaryOp::ComputeEx<cpu, op::mshadow_op::plus>( {}, {}, {to_merge, updateBuf->merged}, {kWriteTo}, {out}); on_complete(); }, to_merge.ctx(), {to_merge.var(), updateBuf->merged.var()}, {out.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); CopyFromTo(out, &(updateBuf->merged), 0); updateBuf->merged.WaitToRead(); } void RowSparsePullResponse(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { if (log_verbose_) LOG(INFO) << "pull: " << master_key; ps::KVPairs<char> response; if (num_rows == 0) { std::vector<int> lens(req_data.keys.size(), 0); response.keys = req_data.keys; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); return; } const NDArray& stored = store_[master_key]; if (has_multi_precision_copy(type)) stored.WaitToRead(); CHECK(!stored.is_none()) << "init " << master_key << " first"; auto shape = stored.shape(); auto unit_len = shape.ProdShape(1, shape.ndim()); const int num_bytes = mshadow::mshadow_sizeof(type.dtype); const int unit_size = unit_len * num_bytes; const char* data = static_cast<char *> (stored.data().dptr_); auto len = num_rows * unit_size; // concat values response.vals.resize(len); #pragma omp parallel for for (size_t i = 1; i <= num_rows; i++) { int key = DecodeKey(req_data.keys[i]); int64_t row_id = key - master_key; const auto src = data + row_id * unit_size; auto begin = (i - 1) * unit_size; auto end = i * unit_size; response.vals.segment(begin, end).CopyFrom(src, unit_size); } // setup response response.keys = req_data.keys; std::vector<int> lens(req_data.keys.size(), unit_len); lens[0] = 0; response.lens.CopyFrom(lens.begin(), lens.end()); server->Response(req_meta, response); } void InitRowSparseStored(const DataHandleType type, const int master_key, const size_t num_rows, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { auto& stored = has_multi_precision_copy(type) ? store_realt_[master_key] : store_[master_key]; int dtype = type.dtype; int num_bytes = mshadow::mshadow_sizeof(dtype); auto unit_len = req_data.lens[1] / num_bytes; CHECK_GT(unit_len, 0); size_t ds[] = {num_rows, (size_t) unit_len}; mxnet::TShape dshape(ds, ds + 2); CHECK_EQ(req_data.vals.size(), num_rows * unit_len * num_bytes); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); stored = NDArray(kRowSparseStorage, dshape, Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); if (has_multi_precision_copy(type)) { store_[master_key] = NDArray(kRowSparseStorage, dshape, Context(), true, type.dtype); } Engine::Get()->PushAsync( [this, recved, stored, type](RunContext ctx, Engine::CallbackOnComplete on_complete) { NDArray rsp = stored; stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])}); mshadow::Stream<cpu> *s = ctx.get_stream<cpu>(); using namespace mxnet::op; nnvm::dim_t nnr = rsp.shape()[0]; MSHADOW_IDX_TYPE_SWITCH(rsp.aux_type(rowsparse::kIdx), IType, { IType* idx = rsp.aux_data(rowsparse::kIdx).dptr<IType>(); mxnet_op::Kernel<PopulateFullIdxRspKernel, cpu>::Launch(s, nnr, idx); }); TBlob rsp_data = rsp.data(); // copies or casts as appropriate ndarray::Copy<cpu, cpu>(recved.data(), &rsp_data, Context(), Context(), RunContext()); on_complete(); }, recved.ctx(), {recved.var()}, {stored.var()}, FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME); if (has_multi_precision_copy(type)) { CopyFromTo(stored, store_[master_key]); store_[master_key].WaitToRead(); } stored.WaitToRead(); server->Response(req_meta); } void DataHandleRowSparse(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char>& req_data, ps::KVServer<char>* server) { int master_key = DecodeKey(req_data.keys[0]); auto num_rows = req_data.keys.size() - 1; auto& stored = store_[master_key]; if (req_meta.push) { CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty"; CHECK_EQ(req_data.lens[0], 0); if (stored.is_none()) { if (log_verbose_) LOG(INFO) << "initial push: " << master_key; // initialization CHECK_GT(num_rows, 0) << "init with empty data is not supported"; InitRowSparseStored(type, master_key, num_rows, req_meta, req_data, server); return; } else { if (log_verbose_) LOG(INFO) << "push: " << master_key << " " << req_data.keys; auto& updates = update_buf_[master_key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(kRowSparseStorage, stored.shape(), Context(), false, mshadow::kFloat32); } if (num_rows == 0) { if (sync_mode_) { if (updates.request.empty()) { // reset to zeros int merged_dtype = has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype; updates.merged = NDArray(kRowSparseStorage, stored.shape(), Context(), true, merged_dtype); } // else nothing to aggregate updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } else { server->Response(req_meta); } } else { auto unit_len = req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype); CHECK_GT(unit_len, 0); // indices std::vector<int64_t> indices(num_rows); DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows); // data TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask); size_t ds[] = {(size_t) num_rows, (size_t) unit_len}; mxnet::TShape dshape(ds, ds + 2); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) // row_sparse NDArray NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0); if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); AccumulateRowSparseGrads(type, recved, &updates); } updates.request.push_back(req_meta); ApplyUpdates(type, master_key, req_data, &updates, server); } } } else { // pull RowSparsePullResponse(type, master_key, num_rows, req_meta, req_data, server); } } void DefaultStorageResponse(const DataHandleType type, const int key, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { ps::KVPairs<char> response; const NDArray& stored = store_[key]; CHECK(!stored.is_none()) << "init " << key << " first"; // as server returns when store_realt is ready in this case if (has_multi_precision_copy(type)) stored.WaitToRead(); auto len = stored.shape().Size() * mshadow::mshadow_sizeof(stored.dtype()); response.keys = req_data.keys; response.lens = {len}; // TODO(mli) try to remove this CopyFrom response.vals.CopyFrom(static_cast<const char*>(stored.data().dptr_), len); server->Response(req_meta, response); } void DataHandleCompressed(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { CHECK_EQ(type.dtype, mshadow::kFloat32) << "Gradient compression is currently supported for fp32 only"; if (req_meta.push) { // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished // first for dummy key which represents original size of array, whose len is 0 CHECK_EQ(req_data.keys.size(), (size_t)2); CHECK_EQ(req_data.lens.size(), (size_t)2); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[1]); int original_size = DecodeKey(req_data.keys[0]); int key = DecodeKey(req_data.keys[1]); auto& stored = store_[key]; size_t ds[] = {(size_t)req_data.lens[1] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob(reinterpret_cast<real_t*>(req_data.vals.data()), dshape, cpu::kDevMask); NDArray recved = NDArray(recv_blob, 0); NDArray decomp_buf = decomp_buf_[key]; dshape = mxnet::TShape{(int64_t) original_size}; if (decomp_buf.is_none()) { decomp_buf = NDArray(dshape, Context()); } if (stored.is_none()) { stored = NDArray(dshape, Context()); gradient_compression_->Dequantize(recved, &stored, 0); server->Response(req_meta); stored.WaitToRead(); } else if (sync_mode_) { // synced push auto& merged = update_buf_[key]; if (merged.merged.is_none()) { merged.merged = NDArray(dshape, Context()); } if (merged.request.size() == 0) { gradient_compression_->Dequantize(recved, &merged.merged, 0); } else { gradient_compression_->Dequantize(recved, &decomp_buf, 0); merged.merged += decomp_buf; } merged.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &merged, server); } else { // async push gradient_compression_->Dequantize(recved, &decomp_buf, 0); exec_.Exec([this, key, &decomp_buf, &stored]() { CHECK(updater_); updater_(key, decomp_buf, &stored); }); server->Response(req_meta); stored.WaitToRead(); } } else { // pull CHECK_EQ(req_data.keys.size(), (size_t)1); CHECK_EQ(req_data.lens.size(), (size_t)0); int key = DecodeKey(req_data.keys[0]); DefaultStorageResponse(type, key, req_meta, req_data, server); } } void DataHandleDefault(const DataHandleType type, const ps::KVMeta& req_meta, const ps::KVPairs<char> &req_data, ps::KVServer<char>* server) { // do some check CHECK_EQ(req_data.keys.size(), (size_t)1); if (req_meta.push) { CHECK_EQ(req_data.lens.size(), (size_t)1); CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]); } int key = DecodeKey(req_data.keys[0]); auto& stored = has_multi_precision_copy(type) ? store_realt_[key] : store_[key]; // there used several WaitToRead, this is because \a recved's memory // could be deallocated when this function returns. so we need to make sure // the operators with \a NDArray are actually finished if (req_meta.push) { size_t ds[] = {(size_t) req_data.lens[0] / mshadow::mshadow_sizeof(type.dtype)}; mxnet::TShape dshape(ds, ds + 1); TBlob recv_blob; MSHADOW_REAL_TYPE_SWITCH(type.dtype, DType, { recv_blob = TBlob(reinterpret_cast<DType*>(req_data.vals.data()), dshape, cpu::kDevMask); }) NDArray recved = NDArray(recv_blob, 0); if (stored.is_none()) { // initialization stored = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); CopyFromTo(recved, &stored, 0); server->Response(req_meta); if (has_multi_precision_copy(type)) { auto& stored_dtype = store_[key]; stored_dtype = NDArray(dshape, Context(), false, type.dtype); CopyFromTo(stored, stored_dtype); stored_dtype.WaitToRead(); } stored.WaitToRead(); } else { auto &updates = update_buf_[key]; if (sync_mode_ && updates.merged.is_none()) { updates.merged = NDArray(dshape, Context(), false, has_multi_precision_copy(type) ? mshadow::kFloat32 : type.dtype); } if (has_multi_precision_copy(type) && updates.temp_array.is_none()) { updates.temp_array = NDArray(dshape, Context(), false, mshadow::kFloat32); } if (updates.request.empty()) { if (sync_mode_) { CopyFromTo(recved, updates.merged); } else { if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); } else { updates.temp_array = recved; } } } else { CHECK(sync_mode_); if (has_multi_precision_copy(type)) { CopyFromTo(recved, updates.temp_array); updates.merged += updates.temp_array; } else { updates.merged += recved; } } updates.request.push_back(req_meta); ApplyUpdates(type, key, req_data, &updates, server); } } else { DefaultStorageResponse(type, key, req_meta, req_data, server); } } int DecodeKey(ps::Key key) { auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()]; return key - kr.begin(); } /** * \brief user defined mode for push */ bool sync_mode_; KVStore::Controller controller_; KVStore::Updater updater_; /** * \brief store_ contains the value at kvstore for each key */ std::unordered_map<int, NDArray> store_; std::unordered_map<int, NDArray> store_realt_; /** * \brief merge_buf_ is a buffer used if sync_mode is true. It represents * values from different workers being merged. The store will be updated * to this value when values from all workers are pushed into this buffer. */ std::unordered_map<int, UpdateBuf> update_buf_; /** * \brief decomp_buf_ is a buffer into which compressed values are * decompressed before merging to the store. used when compress_!='none' */ std::unordered_map<int, NDArray> decomp_buf_; Executor exec_; ps::KVServer<char>* ps_server_; // whether to LOG verbose information bool log_verbose_; /* * \brief whether to use multi precision mode. * in multi precision mode, all weights are stored as float32. * any gradient received will be cast to float32 before accumulation and updating of weights. */ bool multi_precision_; /** * \brief gradient compression object. * starts with none, used after SetGradientCompression sets the type * currently there is no support for unsetting gradient compression */ std::shared_ptr<kvstore::GradientCompression> gradient_compression_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
SparseDenseProduct.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEDENSEPRODUCT_H #define EIGEN_SPARSEDENSEPRODUCT_H namespace Eigen { namespace internal { template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; }; template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType, int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor, bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1> struct sparse_time_dense_product_impl; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; typedef evaluator<Lhs> LhsEval; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { LhsEval lhsEval(lhs); Index n = lhs.outerSize(); #ifdef EIGEN_HAS_OPENMP Eigen::initParallel(); Index threads = Eigen::nbThreads(); #endif for(Index c=0; c<rhs.cols(); ++c) { #ifdef EIGEN_HAS_OPENMP // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems. // It basically represents the minimal amount of work to be done to be worth it. if(threads>1 && lhsEval.nonZerosEstimate() > 20000) { #pragma omp parallel for schedule(static) num_threads(threads) for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } else #endif { for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } } } static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col) { typename Res::Scalar tmp(0); for(LhsInnerIterator it(lhsEval,i); it ;++it) tmp += it.value() * rhs.coeff(it.index(),col); res.coeffRef(i,col) += alpha * tmp; } }; // FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format? template<typename T1, typename T2/*, int _Options, typename _StrideType*/> struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> > { enum { Defined = 1 }; typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType; }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index c=0; c<rhs.cols(); ++c) { for(Index j=0; j<lhs.outerSize(); ++j) { // typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c); typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.coeffRef(it.index(),c) += it.value() * rhs_j; } } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index j=0; j<lhs.outerSize(); ++j) { typename Res::RowXpr res_j(res.row(j)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res_j += (alpha*it.value()) * rhs.row(it.index()); } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index j=0; j<lhs.outerSize(); ++j) { typename Rhs::ConstRowXpr rhs_j(rhs.row(j)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.row(it.index()) += (alpha*it.value()) * rhs_j; } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType> inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha); } } // end namespace internal namespace internal { template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,Dynamic>::type LhsNested; typedef typename nested_eval<Rhs,Dynamic>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType> : generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> {}; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dst> static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,Dynamic>::type LhsNested; typedef typename nested_eval<Rhs,Dynamic>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); // transpose everything Transpose<Dst> dstT(dst); internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType> : generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> {}; template<typename LhsT, typename RhsT, bool NeedToTranspose> struct sparse_dense_outer_product_evaluator { protected: typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1; typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs; typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType; // if the actual left-hand side is a dense vector, // then build a sparse-view so that we can seamlessly iterate over it. typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1, SparseView<Lhs1> >::type ActualLhs; typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1 const&, SparseView<Lhs1> >::type LhsArg; typedef evaluator<ActualLhs> LhsEval; typedef evaluator<ActualRhs> RhsEval; typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator; typedef typename ProdXprType::Scalar Scalar; public: enum { Flags = NeedToTranspose ? RowMajorBit : 0, CoeffReadCost = Dynamic }; class InnerIterator : public LhsIterator { public: InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer) : LhsIterator(xprEval.m_lhsXprImpl, 0), m_outer(outer), m_empty(false), m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() )) {} EIGEN_STRONG_INLINE Index outer() const { return m_outer; } EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; } EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); } protected: Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const { return rhs.coeff(outer); } Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse()) { typename RhsEval::InnerIterator it(rhs, outer); if (it && it.index()==0 && it.value()!=Scalar(0)) return it.value(); m_empty = true; return Scalar(0); } Index m_outer; bool m_empty; Scalar m_factor; }; sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) {} // transpose case sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) {} protected: const LhsArg m_lhs; evaluator<ActualLhs> m_lhsXprImpl; evaluator<ActualRhs> m_rhsXprImpl; }; // sparse * dense outer product template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape, typename traits<Lhs>::Scalar, typename traits<Rhs>::Scalar> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSEDENSEPRODUCT_H
rom_builder_and_solver.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Raul Bravo // #if !defined(KRATOS_ROM_BUILDER_AND_SOLVER) #define KRATOS_ROM_BUILDER_AND_SOLVER /* System includes */ /* External includes */ /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "utilities/builtin_timer.h" /* Application includes */ #include "rom_application_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template <class TSparseSpace, class TDenseSpace, class TLinearSolver> class ROMBuilderAndSolver : public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: //TODO: UPDATE THIS /** * This struct is used in the component wise calculation only * is defined here and is used to declare a member variable in the component wise builder and solver * private pointers can only be accessed by means of set and get functions * this allows to set and not copy the Element_Variables and Condition_Variables * which will be asked and set by another strategy object */ ///@name Type Definitions ///@{ // Class pointer definition KRATOS_CLASS_POINTER_DEFINITION(ROMBuilderAndSolver); // The size_t types typedef std::size_t SizeType; typedef std::size_t IndexType; /// The definition of the current class typedef ROMBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> ClassType; /// Definition of the classes from the base class typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; /// Additional definitions typedef Element::EquationIdVectorType EquationIdVectorType; typedef Element::DofsVectorType DofsVectorType; typedef boost::numeric::ublas::compressed_matrix<double> CompressedMatrixType; /// DoF types definition typedef Node<3> NodeType; typedef typename NodeType::DofType DofType; typedef typename DofType::Pointer DofPointerType; typedef typename std::unordered_set<DofPointerType, DofPointerHasher> DofSetType; ///@} ///@name Life cycle ///@{ explicit ROMBuilderAndSolver( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters) : BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver) { // Validate and assign defaults Parameters this_parameters_copy = ThisParameters.Clone(); this_parameters_copy = this->ValidateAndAssignParameters(this_parameters_copy, this->GetDefaultParameters()); this->AssignSettings(this_parameters_copy); } ~ROMBuilderAndSolver() = default; ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ typename BaseType::Pointer Create( typename TLinearSolver::Pointer pNewLinearSystemSolver, Parameters ThisParameters) const override { return Kratos::make_shared<ClassType>(pNewLinearSystemSolver,ThisParameters); } void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart) override { KRATOS_TRY; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "Setting up the dofs" << std::endl; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Number of threads" << ParallelUtilities::GetNumThreads() << "\n" << std::endl; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing element loop" << std::endl; // Get model part data const auto& r_elements_array = rModelPart.Elements(); const auto& r_conditions_array = rModelPart.Conditions(); const auto& r_constraints_array = rModelPart.MasterSlaveConstraints(); const int number_of_elements = static_cast<int>(r_elements_array.size()); const int number_of_conditions = static_cast<int>(r_conditions_array.size()); const int number_of_constraints = static_cast<int>(r_constraints_array.size()); const auto& r_current_process_info = rModelPart.GetProcessInfo(); DofsVectorType dof_list; DofsVectorType second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations DofSetType dof_global_set; dof_global_set.reserve(number_of_elements * 20); if (mHromWeightsInitialized == false){ int number_of_hrom_entities = 0; #pragma omp parallel firstprivate(dof_list, second_dof_list) reduction(+:number_of_hrom_entities) { // We create the temporal set and we reserve some space on them DofSetType dofs_tmp_set; dofs_tmp_set.reserve(20000); // Loop the array of elements ElementsArrayType selected_elements_private; #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_elements; ++i) { auto it_elem = r_elements_array.begin() + i; // Detect whether the element has an hyperreduced weight (H-ROM simulation) or not (ROM simulation) if ((it_elem)->Has(HROM_WEIGHT)){ selected_elements_private.push_back(*it_elem.base()); number_of_hrom_entities++; } else { it_elem->SetValue(HROM_WEIGHT, 1.0); } // Gets list of DOF involved on every element pScheme->GetDofList(*it_elem, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Loop the array of conditions ConditionsArrayType selected_conditions_private; #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_conditions; ++i) { auto it_cond = r_conditions_array.begin() + i; // Detect whether the condition has an hyperreduced weight (H-ROM simulation) or not (ROM simulation) // Note that those conditions used for displaying results are to be ignored as they will not be assembled if (it_cond->Has(HROM_WEIGHT)){ selected_conditions_private.push_back(*it_cond.base()); number_of_hrom_entities++; } else { it_cond->SetValue(HROM_WEIGHT, 1.0); } // Gets list of DOF involved on every condition pScheme->GetDofList(*it_cond, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Loop the array of constraints #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_constraints; ++i) { auto it_const = r_constraints_array.begin() + i; // Gets list of Dof involved on every constraint it_const->GetDofList(dof_list, second_dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end()); } #pragma omp critical { // Collect the elements and conditions belonging to the H-ROM mesh // These are those that feature a weight and are to be assembled for (auto &r_cond : selected_conditions_private){ mSelectedConditions.push_back(&r_cond); } for (auto &r_elem : selected_elements_private){ mSelectedElements.push_back(&r_elem); } // We merge all the sets in one thread dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end()); } } // Update H-ROM flags if (number_of_hrom_entities) { mHromSimulation = true; } mHromWeightsInitialized = true; } else { #pragma omp parallel firstprivate(dof_list, second_dof_list) { // We create the temporal set and we reserve some space on them DofSetType dofs_tmp_set; dofs_tmp_set.reserve(20000); // Loop the array of elements ElementsArrayType selected_elements_private; #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_elements; ++i) { auto it_elem = r_elements_array.begin() + i; // Gets list of DOF involved on every element pScheme->GetDofList(*it_elem, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Loop the array of conditions ConditionsArrayType selected_conditions_private; #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_conditions; ++i) { auto it_cond = r_conditions_array.begin() + i; // Gets list of DOF involved on every condition pScheme->GetDofList(*it_cond, dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); } // Loop the array of constraints #pragma omp for schedule(guided, 512) nowait for (int i = 0; i < number_of_constraints; ++i) { auto it_const = r_constraints_array.begin() + i; // Gets list of Dof involved on every constraint it_const->GetDofList(dof_list, second_dof_list, r_current_process_info); dofs_tmp_set.insert(dof_list.begin(), dof_list.end()); dofs_tmp_set.insert(second_dof_list.begin(), second_dof_list.end()); } #pragma omp critical { // We merge all the sets in one thread dof_global_set.insert(dofs_tmp_set.begin(), dofs_tmp_set.end()); } } } // Fill a sorted auxiliary array of with the DOFs set KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Initializing ordered array filling\n" << std::endl; DofsArrayType Doftemp; Doftemp.reserve(dof_global_set.size()); for (auto it = dof_global_set.begin(); it != dof_global_set.end(); it++) { Doftemp.push_back(*it); } Doftemp.Sort(); // Update base builder and solver DOFs array and set corresponding flag BaseType::GetDofSet() = Doftemp; BaseType::SetDofSetIsInitializedFlag(true); // Throw an exception if there are no DOFs involved in the analysis KRATOS_ERROR_IF(BaseType::GetDofSet().size() == 0) << "No degrees of freedom!" << std::endl; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2)) << "Number of degrees of freedom:" << BaseType::mDofSet.size() << std::endl; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished setting up the dofs" << std::endl; #ifdef KRATOS_DEBUG // If reactions are to be calculated, we check if all the dofs have reactions defined if (BaseType::GetCalculateReactionsFlag()) { for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } void SetUpSystem(ModelPart &rModelPart) override { auto& r_dof_set = BaseType::GetDofSet(); BaseType::mEquationSystemSize = r_dof_set.size(); int ndofs = static_cast<int>(r_dof_set.size()); #pragma omp parallel for firstprivate(ndofs) for (int i = 0; i < ndofs; i++){ auto dof_iterator = r_dof_set.begin() + i; dof_iterator->SetEquationId(i); } } // Vector ProjectToReducedBasis( // const TSystemVectorType& rX, // ModelPart::NodesContainerType& rNodes // ) // { // Vector rom_unknowns = ZeroVector(mNumberOfRomModes); // for(const auto& node : rNodes) // { // unsigned int node_aux_id = node.GetValue(AUX_ID); // const auto& nodal_rom_basis = node.GetValue(ROM_BASIS); // for (int i = 0; i < mNumberOfRomModes; ++i) { // for (int j = 0; j < mNodalDofs; ++j) { // rom_unknowns[i] += nodal_rom_basis(j, i)*rX(node_aux_id*mNodalDofs + j); // } // } // } // return rom_unknowns; // } void ProjectToFineBasis( const TSystemVectorType& rRomUnkowns, ModelPart& rModelPart, TSystemVectorType& rDx) { auto& r_dof_set = BaseType::GetDofSet(); const int dofs_number = r_dof_set.size(); const auto dofs_begin = r_dof_set.begin(); #pragma omp parallel firstprivate(dofs_begin, dofs_number) { const Matrix *pcurrent_rom_nodal_basis = nullptr; unsigned int old_dof_id; #pragma omp for nowait for (int k = 0; k < dofs_number; k++) { auto dof = dofs_begin + k; if (pcurrent_rom_nodal_basis == nullptr) { pcurrent_rom_nodal_basis = &(rModelPart.pGetNode(dof->Id())->GetValue(ROM_BASIS)); old_dof_id = dof->Id(); } else if (dof->Id() != old_dof_id ) { pcurrent_rom_nodal_basis = &(rModelPart.pGetNode(dof->Id())->GetValue(ROM_BASIS)); old_dof_id = dof->Id(); } rDx[dof->EquationId()] = inner_prod(row(*pcurrent_rom_nodal_basis, mMapPhi[dof->GetVariable().Key()]), rRomUnkowns); } } } void GetPhiElemental( Matrix &PhiElemental, const Element::DofsVectorType& rDofs, const Element::GeometryType& rGeom) { const Matrix *pcurrent_rom_nodal_basis = nullptr; int counter = 0; for(int k = 0; k < static_cast<int>(rDofs.size()); ++k){ auto variable_key = rDofs[k]->GetVariable().Key(); if(k==0) { pcurrent_rom_nodal_basis = &(rGeom[counter].GetValue(ROM_BASIS)); } else if(rDofs[k]->Id() != rDofs[k-1]->Id()) { counter++; pcurrent_rom_nodal_basis = &(rGeom[counter].GetValue(ROM_BASIS)); } if (rDofs[k]->IsFixed()) { noalias(row(PhiElemental, k)) = ZeroVector(PhiElemental.size2()); } else { noalias(row(PhiElemental, k)) = row(*pcurrent_rom_nodal_basis, mMapPhi[variable_key]); } } } void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { // Define a dense matrix to hold the reduced problem Matrix Arom = ZeroMatrix(mNumberOfRomModes, mNumberOfRomModes); Vector brom = ZeroVector(mNumberOfRomModes); TSystemVectorType x(Dx.size()); const auto forward_projection_timer = BuiltinTimer(); Vector xrom = ZeroVector(mNumberOfRomModes); //this->ProjectToReducedBasis(x, rModelPart.Nodes(),xrom); KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)) << "Project to reduced basis time: " << forward_projection_timer.ElapsedSeconds() << std::endl; // Build the system matrix by looping over elements and conditions and assembling to A KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; // Getting the elements from the model // Only selected conditions and elements are used for the calculation in an H-ROM simulation const auto el_begin = mHromSimulation ? mSelectedElements.begin() : rModelPart.ElementsBegin(); const int nelements = mHromSimulation ? mSelectedElements.size() : rModelPart.NumberOfElements(); const auto cond_begin = mHromSimulation ? mSelectedConditions.begin() : rModelPart.ConditionsBegin(); const int nconditions = mHromSimulation ? mSelectedConditions.size() : rModelPart.NumberOfConditions(); // Get ProcessInfo from main model part const ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo(); // Contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); // Vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; // Assemble all entities const auto assembling_timer = BuiltinTimer(); #pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId, el_begin, cond_begin) { Matrix PhiElemental; Matrix tempA = ZeroMatrix(mNumberOfRomModes,mNumberOfRomModes); Vector tempb = ZeroVector(mNumberOfRomModes); Matrix aux; #pragma omp for nowait for (int k = 0; k < static_cast<int>(nelements); k++) { auto it_el = el_begin + k; // Detect if the element is active or not. If the user did not make any choice the element is active by default bool element_is_active = true; if ((it_el)->IsDefined(ACTIVE)) { element_is_active = (it_el)->Is(ACTIVE); } // Calculate elemental contribution if (element_is_active){ pScheme->CalculateSystemContributions(*it_el, LHS_Contribution, RHS_Contribution, EquationId, r_current_process_info); Element::DofsVectorType dofs; it_el->GetDofList(dofs, r_current_process_info); const auto &geom = it_el->GetGeometry(); if(PhiElemental.size1() != dofs.size() || PhiElemental.size2() != mNumberOfRomModes) { PhiElemental.resize(dofs.size(), mNumberOfRomModes,false); } if(aux.size1() != dofs.size() || aux.size2() != mNumberOfRomModes) { aux.resize(dofs.size(), mNumberOfRomModes,false); } GetPhiElemental(PhiElemental, dofs, geom); noalias(aux) = prod(LHS_Contribution, PhiElemental); double h_rom_weight = it_el->GetValue(HROM_WEIGHT); noalias(tempA) += prod(trans(PhiElemental), aux) * h_rom_weight; noalias(tempb) += prod(trans(PhiElemental), RHS_Contribution) * h_rom_weight; } } #pragma omp for nowait for (int k = 0; k < static_cast<int>(nconditions); k++){ auto it = cond_begin + k; // Detect if the element is active or not. If the user did not make any choice the condition is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) { condition_is_active = (it)->Is(ACTIVE); } // Calculate condition contribution if (condition_is_active) { Condition::DofsVectorType dofs; it->GetDofList(dofs, r_current_process_info); pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, r_current_process_info); const auto &geom = it->GetGeometry(); if(PhiElemental.size1() != dofs.size() || PhiElemental.size2() != mNumberOfRomModes) { PhiElemental.resize(dofs.size(), mNumberOfRomModes,false); } if(aux.size1() != dofs.size() || aux.size2() != mNumberOfRomModes) { aux.resize(dofs.size(), mNumberOfRomModes,false); } GetPhiElemental(PhiElemental, dofs, geom); noalias(aux) = prod(LHS_Contribution, PhiElemental); double h_rom_weight = it->GetValue(HROM_WEIGHT); noalias(tempA) += prod(trans(PhiElemental), aux) * h_rom_weight; noalias(tempb) += prod(trans(PhiElemental), RHS_Contribution) * h_rom_weight; } } #pragma omp critical { noalias(Arom) += tempA; noalias(brom) += tempb; } } KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)) << "Build time: " << assembling_timer.ElapsedSeconds() << std::endl; KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0)) << "Finished parallel building" << std::endl; //solve for the rom unkowns dunk = Arom^-1 * brom Vector dxrom(xrom.size()); const auto solving_timer = BuiltinTimer(); MathUtils<double>::Solve(Arom, dxrom, brom); KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)) << "Solve reduced system time: " << solving_timer.ElapsedSeconds() << std::endl; // //update database // noalias(xrom) += dxrom; // project reduced solution back to full order model const auto backward_projection_timer = BuiltinTimer(); ProjectToFineBasis(dxrom, rModelPart, Dx); KRATOS_INFO_IF("ROMBuilderAndSolver", (this->GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0)) << "Project to fine basis time: " << backward_projection_timer.ElapsedSeconds() << std::endl; } void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType &pA, TSystemVectorPointerType &pDx, TSystemVectorPointerType &pb, ModelPart &rModelPart) override { KRATOS_TRY // If not initialized, initalize the system arrays to an empty vector/matrix if (!pA) { TSystemMatrixPointerType p_new_A = Kratos::make_shared<TSystemMatrixType>(0, 0); pA.swap(p_new_A); } if (!pDx) { TSystemVectorPointerType p_new_Dx = Kratos::make_shared<TSystemVectorType>(0); pDx.swap(p_new_Dx); } if (!pb) { TSystemVectorPointerType p_new_b = Kratos::make_shared<TSystemVectorType>(0); pb.swap(p_new_b); } TSystemVectorType& r_Dx = *pDx; if (r_Dx.size() != BaseType::GetEquationSystemSize()) { r_Dx.resize(BaseType::GetEquationSystemSize(), false); } TSystemVectorType& r_b = *pb; if (r_b.size() != BaseType::GetEquationSystemSize()) { r_b.resize(BaseType::GetEquationSystemSize(), false); } KRATOS_CATCH("") } Parameters GetDefaultParameters() const override { Parameters default_parameters = Parameters(R"( { "name" : "rom_builder_and_solver", "nodal_unknowns" : [], "number_of_rom_dofs" : 10 })"); default_parameters.AddMissingParameters(BaseType::GetDefaultParameters()); return default_parameters; } static std::string Name() { return "rom_builder_and_solver"; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const override { return "ROMBuilderAndSolver"; } /// Print information about this object. virtual void PrintInfo(std::ostream &rOStream) const override { rOStream << Info(); } /// Print object's data. virtual void PrintData(std::ostream &rOStream) const override { rOStream << Info(); } ///@} ///@name Friends ///@{ ///@} protected: ///@} ///@name Protected static member variables ///@{ ///@} ///@name Protected member variables ///@{ SizeType mNodalDofs; SizeType mNumberOfRomModes; std::unordered_map<Kratos::VariableData::KeyType,int> mMapPhi; ElementsArrayType mSelectedElements; ConditionsArrayType mSelectedConditions; bool mHromSimulation = false; bool mHromWeightsInitialized = false; ///@} ///@name Protected operators ///@{ ///@} ///@name Protected operations ///@{ void AssignSettings(const Parameters ThisParameters) override { BaseType::AssignSettings(ThisParameters); // Set member variables mNodalDofs = ThisParameters["nodal_unknowns"].size(); mNumberOfRomModes = ThisParameters["number_of_rom_dofs"].GetInt(); // Set up a map with key the variable key and value the correct row in ROM basis IndexType k = 0; for (const auto& r_var_name : ThisParameters["nodal_unknowns"].GetStringArray()) { if(KratosComponents<Variable<double>>::Has(r_var_name)) { const auto& var = KratosComponents<Variable<double>>::Get(r_var_name); mMapPhi[var.Key()] = k++; } else { KRATOS_ERROR << "Variable \""<< r_var_name << "\" not valid" << std::endl; } } } ///@} ///@name Protected access ///@{ ///@} ///@name Protected inquiry ///@{ ///@} ///@name Protected life cycle ///@{ ///@} }; /* Class ROMBuilderAndSolver */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_ROM_BUILDER_AND_SOLVER defined */
GB_unaryop__ainv_int32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int32_uint16 // op(A') function: GB_tran__ainv_int32_uint16 // C type: int32_t // A type: uint16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int32_uint16 ( int32_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
task_untied_threadid2.c
// RUN: %libomp-compile-and-run // REQUIRES: abt // Compilation error after clang11+ // UNSUPPORTED: clang #include "omp_testsuite.h" #include <string.h> #include <stdio.h> int test_task_untied_threadid2(int num_threads) { int i, vals[NUM_TASKS]; ABT_thread abt_threads[NUM_TASKS]; memset(vals, 0, sizeof(vals)); #pragma omp parallel num_threads(num_threads) { #pragma omp master { for (i = 0; i < NUM_TASKS; i++) { #pragma omp task firstprivate(i) untied { ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_threads[i])); // Context switching in OpenMP. #pragma omp taskyield int omp_thread_id2 = omp_get_thread_num(); ABT_thread abt_thread = abt_threads[i]; ABT_thread abt_thread2; ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2)); ABT_bool abt_thread_equal; ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2, &abt_thread_equal)); if (abt_thread_equal == ABT_TRUE) { vals[i] += 1; } // Context switching in Argobots. ABT_EXIT_IF_FAIL(ABT_thread_yield()); int omp_thread_id3 = omp_get_thread_num(); if (omp_thread_id2 == omp_thread_id3) { // Argobots context switch does not change the thread-task mapping. vals[i] += 2; } } } } } for (i = 0; i < NUM_TASKS; i++) { if (vals[i] != 3) { printf("vals[%d] == %d\n", i, vals[i]); return 0; } } return 1; } int main() { int i, num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_task_untied_threadid2(i + 1)) { num_failed++; } } return num_failed; }
GB_unop__log1p_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log1p_fp32_fp32 // op(A') function: GB_unop_tran__log1p_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = log1pf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = log1pf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = log1pf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG1P || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log1p_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = log1pf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log1p_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
deconvolution_packn_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_packn_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl); if (bias_data_ptr) { _sum = vle32_v_f32m2(bias_data_ptr + p * packn, vl); } const __fp16* kptr = (const __fp16*)weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn; int k = y * kernel_w + x; for (int l = 0; l < packn; l++) { __fp16 val = *sptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr + k * packn * packn + packn * l, vl); _sum = vfwmacc_vf_f32m2(_sum, val, _w0, vl); } } } kptr += maxk * packn * packn; } _sum = activation_ps(_sum, activation_type, activation_params, vl); vse16_v_f16m1(outptr + j * packn, vfncvt_f_f_w_f16m1(_sum, vl), vl); } outptr += outw * packn; } } } static void deconvolution_packn_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); if (bias_data_ptr) { _sum = vle16_v_f16m1(bias_data_ptr + p * packn, vl); } const __fp16* kptr = (const __fp16*)weight_data_fp16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn; int k = y * kernel_w + x; for (int l = 0; l < packn; l++) { __fp16 val = *sptr++; vfloat16m1_t _w0 = vle16_v_f16m1(kptr + k * packn * packn + packn * l, vl); _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl); } } } kptr += maxk * packn * packn; } _sum = activation_ps(_sum, activation_type, activation_params, vl); vse16_v_f16m1(outptr + j * packn, _sum, vl); } outptr += outw * packn; } } }
GB_binop__second_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__second_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__second_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__second_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_uint8) // A*D function (colscale): GB (_AxD__second_uint8) // D*A function (rowscale): GB (_DxB__second_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__second_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__second_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_uint8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint8_t // A type: uint8_t // A pattern? 1 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = bij #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // true if values of A are not used #define GB_A_IS_PATTERN \ 1 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_UINT8 || GxB_NO_SECOND_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__second_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__second_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__second_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
gemm_teams.c
/** * gemm.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <omp.h> #include "../../common/polybenchUtilFuncts.h" #define GPU 1 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size */ #define NI 1024 #define NJ 1024 #define NK 1024 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 32412.0f #define BETA 2123.0f /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void gemm(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i,j,k; for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] *= BETA; for (k = 0; k < NK; ++k) { C[i*NJ + j] += ALPHA * A[i*NK + k] * B[k*NJ + j]; } } } } void gemm_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int i,j,k; #pragma omp target map(to: A[:NI*NK], B[:NK*NJ]) map(tofrom: C[:NI*NJ]) #pragma omp teams distribute parallel for for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] *= BETA; for (k = 0; k < NK; ++k) { C[i*NJ + j] += ALPHA * A[i*NK + k] * B[k*NJ + j]; } } } } void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *C_OMP) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NK + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NJ + j] = ((DATA_TYPE) i*j + 1) / NJ; } } for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C[i*NJ + j] = ((DATA_TYPE) i*j + 2) / NJ; C_OMP[i*NJ + j] = ((DATA_TYPE) i*j + 2) / NJ; } } } void compareResults(DATA_TYPE* C, DATA_TYPE* C_outputFromGpu) { int i, j, fail; fail = 0; // Compare C1 and C2 for (i=0; i < NI; i++) { for (j=0; j < NJ; j++) { if (percentDiff(C[i*NJ + j], C_outputFromGpu[i*NJ + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE* C_outputFromGpu; A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); C_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); fprintf(stdout, "<< Matrix-multiply C=alpha.A.B+beta.C >>\n"); init(A, B, C, C_outputFromGpu); t_start = rtclock(); gemm_OMP(A, B, C_outputFromGpu); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); t_start = rtclock(); gemm(A, B, C); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(C, C_outputFromGpu); free(A); free(B); free(C); free(C_outputFromGpu); return 0; }
mandelbrot.c
/* To compile: gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp Or just type: module load gcc make To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "png_util.h" // Q2a: add include for OpenMP header file here: #include <omp.h> #define MXITER 1000 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){ int n,m; complex_t c; double dr = (cmax.r-cmin.r)/(Nre-1); double di = (cmax.i-cmin.i)/(Nim-1);; // Q2c: add a compiler directive to split the outer for loop amongst threads here #pragma omp parallel for private(m, c) for(n=0;n<Nim;++n){ for(m=0;m<Nre;++m){ c.r = cmin.r + dr*m; c.i = cmin.i + di*n; count[m+n*Nre] = testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 1 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[3]); // Q2b: set the number of OpenMP threads to be Nthreads here: omp_set_num_threads(Nthreads); // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; // Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time double start = omp_get_wtime(); // compute mandelbrot set mandelbrot(Nre, Nim, cmin, cmax, count); // Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time double end = omp_get_wtime(); // print elapsed time printf("elapsed = %g\n", end-start); // output mandelbrot to png format image FILE *fp = fopen("mandelbrot.png", "w"); write_hot_png(fp, Nre, Nim, count, 0, 80); exit(0); return 0; }
util.h
/* Copyright (c) 2013, Taiga Nomi All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the <organization> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <vector> #include <functional> #include <random> #include <type_traits> #include <limits> #include <cassert> #include <cstdio> #include <cstdarg> #include <string> #include "aligned_allocator.h" #include "nn_error.h" #include "tiny_cnn/config.h" #ifdef CNN_USE_TBB #ifndef NOMINMAX #define NOMINMAX // tbb includes windows.h in tbb/machine/windows_api.h #endif #include <tbb/tbb.h> #include <tbb/task_group.h> #endif #ifndef CNN_USE_OMP #include <thread> #include <future> #endif #define CNN_UNREFERENCED_PARAMETER(x) (void)(x) namespace tiny_cnn { ///< output label(class-index) for classification ///< must be equal to cnn_size_t, because size of last layer is equal to num. of classes typedef cnn_size_t label_t; typedef cnn_size_t layer_size_t; // for backward compatibility typedef std::vector<float_t, aligned_allocator<float_t, 64>> vec_t; enum class net_phase { train, test }; template<typename T> inline typename std::enable_if<std::is_integral<T>::value, T>::type uniform_rand(T min, T max) { // avoid gen(0) for MSVC known issue // https://connect.microsoft.com/VisualStudio/feedback/details/776456 static std::mt19937 gen(1); std::uniform_int_distribution<T> dst(min, max); return dst(gen); } template<typename T> inline typename std::enable_if<std::is_floating_point<T>::value, T>::type uniform_rand(T min, T max) { static std::mt19937 gen(1); std::uniform_real_distribution<T> dst(min, max); return dst(gen); } template<typename T> inline typename std::enable_if<std::is_floating_point<T>::value, T>::type gaussian_rand(T mean, T sigma) { static std::mt19937 gen(1); std::normal_distribution<T> dst(mean, sigma); return dst(gen); } template<typename Container> inline int uniform_idx(const Container& t) { return uniform_rand(0, int(t.size() - 1)); } inline bool bernoulli(float_t p) { return uniform_rand(float_t(0), float_t(1)) <= p; } template<typename Iter> void uniform_rand(Iter begin, Iter end, float_t min, float_t max) { for (Iter it = begin; it != end; ++it) *it = uniform_rand(min, max); } template<typename Iter> void gaussian_rand(Iter begin, Iter end, float_t mean, float_t sigma) { for (Iter it = begin; it != end; ++it) *it = gaussian_rand(mean, sigma); } template<typename T> T* reverse_endian(T* p) { std::reverse(reinterpret_cast<char*>(p), reinterpret_cast<char*>(p) + sizeof(T)); return p; } inline bool is_little_endian() { int x = 1; return *(char*) &x != 0; } template<typename T> size_t max_index(const T& vec) { auto begin_iterator = std::begin(vec); return std::max_element(begin_iterator, std::end(vec)) - begin_iterator; } template<typename T, typename U> U rescale(T x, T src_min, T src_max, U dst_min, U dst_max) { U value = static_cast<U>(((x - src_min) * (dst_max - dst_min)) / (src_max - src_min) + dst_min); return std::min(dst_max, std::max(value, dst_min)); } inline void nop() { // do nothing } #ifdef CNN_USE_TBB static tbb::task_scheduler_init tbbScheduler(tbb::task_scheduler_init::automatic);//tbb::task_scheduler_init::deferred); typedef tbb::blocked_range<int> blocked_range; template<typename Func> void parallel_for(int begin, int end, const Func& f, int grainsize) { tbb::parallel_for(blocked_range(begin, end, end - begin > grainsize ? grainsize : 1), f); } template<typename Func> void xparallel_for(int begin, int end, const Func& f) { f(blocked_range(begin, end, 100)); } #else struct blocked_range { typedef int const_iterator; blocked_range(int begin, int end) : begin_(begin), end_(end) {} blocked_range(size_t begin, size_t end) : begin_(static_cast<int>(begin)), end_(static_cast<int>(end)) {} const_iterator begin() const { return begin_; } const_iterator end() const { return end_; } private: int begin_; int end_; }; template<typename Func> void xparallel_for(size_t begin, size_t end, const Func& f) { blocked_range r(begin, end); f(r); } #ifdef CNN_USE_OMP template<typename Func> void parallel_for(int begin, int end, const Func& f, int /*grainsize*/) { #pragma omp parallel for for (int i=begin; i<end; ++i) f(blocked_range(i,i+1)); } #else template<typename Func> void parallel_for(int start, int end, const Func &f, int /*grainsize*/) { int nthreads = std::thread::hardware_concurrency(); int blockSize = (end - start) / nthreads; if (blockSize*nthreads < end - start) blockSize++; std::vector<std::future<void>> futures; int blockStart = start; int blockEnd = blockStart + blockSize; if (blockEnd > end) blockEnd = end; for (int i = 0; i < nthreads; i++) { futures.push_back(std::move(std::async(std::launch::async, [blockStart, blockEnd, &f] { f(blocked_range(blockStart, blockEnd)); }))); blockStart += blockSize; blockEnd = blockStart + blockSize; if (blockStart >= end) break; if (blockEnd > end) blockEnd = end; } for (auto &future : futures) future.wait(); } #endif #endif // CNN_USE_TBB template<typename T, typename U> bool value_representation(U const &value) { return static_cast<U>(static_cast<T>(value)) == value; } template<typename T, typename Func> inline void for_(std::true_type, bool parallelize, int begin, T end, Func f, int grainsize = 100){ parallelize = parallelize && value_representation<int>(end); parallelize ? parallel_for(begin, static_cast<int>(end), f, grainsize) : xparallel_for(begin, static_cast<int>(end), f); } template<typename T, typename Func> inline void for_(std::false_type, bool parallelize, int begin, T end, Func f, int grainsize = 100){ parallelize ? parallel_for(begin, static_cast<int>(end), f, grainsize) : xparallel_for(begin, end, f); } template<typename T, typename Func> inline void for_(bool parallelize, int begin, T end, Func f, int grainsize = 100) { static_assert(std::is_integral<T>::value, "end must be integral type"); for_(typename std::is_unsigned<T>::type(), parallelize, begin, end, f, grainsize); } template <typename T, typename Func> void for_i(bool parallelize, T size, Func f, int grainsize = 100) { for_(parallelize, 0, size, [&](const blocked_range& r) { #ifdef CNN_USE_OMP #pragma omp parallel for #endif for (int i = r.begin(); i < r.end(); i++) f(i); }, grainsize); } template <typename T, typename Func> void for_i(T size, Func f, int grainsize = 100) { for_i(true, size, f, grainsize); } template <typename T> inline T sqr(T value) { return value*value; } inline bool isfinite(float_t x) { return x == x; } template <typename Container> inline bool has_infinite(const Container& c) { for (auto v : c) if (!isfinite(v)) return true; return false; } template <typename Container> size_t max_size(const Container& c) { typedef typename Container::value_type value_t; return std::max_element(c.begin(), c.end(), [](const value_t& left, const value_t& right) { return left.size() < right.size(); })->size(); } inline std::string format_str(const char *fmt, ...) { static char buf[2048]; #ifdef _MSC_VER #pragma warning(disable:4996) #endif va_list args; va_start(args, fmt); vsnprintf(buf, sizeof(buf), fmt, args); va_end(args); #ifdef _MSC_VER #pragma warning(default:4996) #endif return std::string(buf); } template <typename T> struct index3d { index3d(T width, T height, T depth) { reshape(width, height, depth); } index3d() : width_(0), height_(0), depth_(0) {} void reshape(T width, T height, T depth) { width_ = width; height_ = height; depth_ = depth; if ((long long) width * height * depth > std::numeric_limits<T>::max()) throw nn_error( format_str("error while constructing layer: layer size too large for tiny-cnn\nWidthxHeightxChannels=%dx%dx%d >= max size of [%s](=%d)", width, height, depth, typeid(T).name(), std::numeric_limits<T>::max())); } T get_index(T x, T y, T channel) const { assert(x >= 0 && x < width_); assert(y >= 0 && y < height_); assert(channel >= 0 && channel < depth_); return (height_ * channel + y) * width_ + x; } T area() const { return width_ * height_; } T size() const { return width_ * height_ * depth_; } T width_; T height_; T depth_; }; template <typename T> bool operator == (const index3d<T>& lhs, const index3d<T>& rhs) { return (lhs.width_ == rhs.width_) && (lhs.height_ == rhs.height_) && (lhs.depth_ == rhs.depth_); } template <typename T> bool operator != (const index3d<T>& lhs, const index3d<T>& rhs) { return !(lhs == rhs); } typedef index3d<cnn_size_t> layer_shape_t; template <typename Stream, typename T> Stream& operator << (Stream& s, const index3d<T>& d) { s << d.width_ << "x" << d.height_ << "x" << d.depth_; return s; } // boilerplate to resolve dependent name #define CNN_USE_LAYER_MEMBERS using layer_base::in_size_;\ using layer_base::out_size_; \ using layer_base::parallelize_; \ using layer_base::next_; \ using layer_base::prev_; \ using layer_base::a_; \ using layer_base::output_; \ using layer_base::prev_delta_; \ using layer_base::W_; \ using layer_base::b_; \ using layer_base::dW_; \ using layer_base::db_; \ using layer_base::Whessian_; \ using layer_base::bhessian_; \ using layer_base::prev_delta2_; \ using layer<Activation>::h_ void CNN_LOG_VECTOR(const vec_t& vec, const std::string& name); } // namespace tiny_cnn #if defined(_MSC_VER) && (_MSC_VER <= 1800) #define CNN_DEFAULT_MOVE_CONSTRUCTOR_UNAVAILABLE #define CNN_DEFAULT_ASSIGNMENT_OPERATOR_UNAVAILABLE #endif
gs_csr_inspector.h
#include<vector> #include <cassert> #include<set> // Makes an edge inside dependence graph inline void connect(int v, int w, std::vector<std::vector<int>> &DAG){ DAG[v].push_back( w ); } /* ****** Inspector for level set parallelization of Forward Solve CSC's outer most loop */ void gs_csr_inspector(int n, int* Lp, int* Li, std::vector<std::vector<int>> &DAG){ // int In_2, In_4, Out_2; // Inspector #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 < Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].push_back(Out_2); } } } #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 > Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].push_back(Out_2); } } } } /* ****** Inspector for level set parallelization of Forward Solve CSC's outer most loop */ void gs_csr_inspector(int n, int* Lp, int* Li, std::vector<std::set<int>> &DAG){ // int In_2, In_4, Out_2; // Inspector #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 < Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].insert(Out_2); } } } // Inspector #pragma omp parallel for schedule(auto) for(int In_2 = 0; In_2 < n; In_2++){ for(int In_4 = Lp[In_2]; In_4 < Lp[In_2+1]; In_4++){ if( In_2 > Li[In_4]){ int Out_2 = Li[In_4]; DAG[In_2].insert(Out_2); } } } }
ast-dump-openmp-teams-distribute-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target #pragma omp teams distribute simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target #pragma omp teams distribute simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target #pragma omp teams distribute simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target #pragma omp teams distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target #pragma omp teams distribute simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute-simd.c:3:1, line:8:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:1, col:34> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:34> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:34> // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:5:1, col:34> // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:4:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:5:1) *const restrict' // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:1, col:34> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:34> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:34> // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:12:1, col:34> // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:11:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:12:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:1, col:46> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:46> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46> // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 1 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:20:1, col:46> // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 1 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:19:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:20:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:1, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:1, col:46> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:1, col:46> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46> // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeSimdDirective {{.*}} <line:28:1, col:46> // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:27:1) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:1> col:1 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:28:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1> // CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:1, col:19> // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:1, col:46> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <col:1, col:46> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-OMPTeamsDistributeSimdDirective {{.*}} <col:1, col:46> // CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2 // CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}} // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-OMPTeamsDistributeSimdDirective {{.*}} <line:36:1, col:46> // CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:35, col:45> // CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:44> 'int' // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:44> 'int' 2 // CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:35:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:1> col:1 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-simd.c:36:1) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:3> 'int' '-' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | `-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:16, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:16, col:26> 'int' '-' // CHECK-NEXT: | | | | | |-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:5> 'int' '-' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | `-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:18, <invalid sloc>> 'int' '+' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:18, col:28> 'int' '-' // CHECK-NEXT: | | | | |-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
solver.c
// solver.c - CPHIS solver implementation #include <cphis.h> #include <solver.h> #include <conf.h> #include <aux.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static CphisError CphisSolverCleanupScale(CphisSolver solver, int scale) { int skipAscale = 0; if ( scale == solver->conf->numScales - 1 && solver->conf->scales[2*scale] == 0 ) { skipAscale = 1; } if (!skipAscale && solver->Ascale[scale]) { CphisMatDestroy(solver->Ascale[scale]); } if (solver->Aresidual[scale]) CphisMatDestroy(solver->Aresidual[scale]); if (solver->Arhs[scale]) CphisMatDestroy(solver->Arhs[scale]); if (solver->r[scale]) CphisVecDestroy(solver->r[scale]); if (solver->e[scale]) CphisVecDestroy(solver->e[scale]); if (solver->fscale[scale]) CphisVecDestroy(solver->fscale[scale]); if (solver->uscale[scale]) CphisVecDestroy(solver->uscale[scale]); if (solver->urhs[scale]) CphisVecDestroy(solver->urhs[scale]); return CPHIS_SUCCESS; } static CphisError CphisSolverSetupScale(CphisSolver solver, int scale) { CphisError err; // Make sure that it is always safe to abort. solver->Ascale[scale] = NULL; solver->Aresidual[scale] = NULL; solver->Arhs[scale] = NULL; solver->r[scale] = NULL; solver->e[scale] = NULL; solver->fscale[scale] = NULL; solver->uscale[scale] = NULL; solver->urhs[scale] = NULL; // Prepare creation of matrices and vectors. const CphisBackendType type = solver->A->type; const CphisIndex numElements = solver->A->numElements; const CphisIndex *elements = solver->A->elements; // Gather some information about the scale. // Number of local d.o.f. in the system const int numLocalDOF = solver->conf->numLocalDOF; // Number of matrix rows const CphisIndex numRows = numElements*numLocalDOF; // Number of local d.o.f. that are smoothed on this scale const int numLocalDOFScale = solver->conf->scales[2*scale + 1] - solver->conf->scales[2*scale] + 1; // Minimum local d.o.f. that is smoothed on this scale const int minLocalDOFScale = solver->conf->scales[2*scale]; // Maximum local d.o.f. that is smoothed on this scale const int maxLocalDOFScale = solver->conf->scales[2*scale + 1]; // Number of local d.o.f. in the residual, i.e., in all lower scales combined int numLocalDOFResidual = -1; if (scale > 0) { numLocalDOFResidual = solver->conf->scales[2*(scale - 1) + 1] + 1; } // Maximum local d.o.f. in the residual, i.e., in any of the lower scales int maxLocalDOFResidual = -1; if (scale > 0) { maxLocalDOFResidual = solver->conf->scales[2*(scale - 1) + 1]; } // Is the scale of HSS-type, i.e., does it smooth only a subset of its local // d.o.f.? const int isHSSType = minLocalDOFScale != 0; // Is this an error correction scale, i.e., one that starts with an initial // guess of zero? const int isErrorCorrectionScale = scale < solver->conf->numScales - 1; // Number of local d.o.f. in this scale that will affect the residual int numLocalDOFScaleToResidual; if (isErrorCorrectionScale) { // Only the smoothed local d.o.f. affect the residual. numLocalDOFScaleToResidual = numLocalDOFScale; } else { // All local d.o.f. affect the residual. numLocalDOFScaleToResidual = maxLocalDOFScale + 1; } // Minimum local d.o.f. that will affect the residual int minLocalDOFScaleToResidual; if (isErrorCorrectionScale) { minLocalDOFScaleToResidual = minLocalDOFScale; } else { minLocalDOFScaleToResidual = 0; } // If the highest scale includes all lower d.o.f. (as in standard // p-multigrid), Ascale[numScales - 1] is equal to the original system matrix, // so we do not have to create a copy. int skipAscale = 0; if (scale == solver->conf->numScales - 1 && !isHSSType) { solver->Ascale[scale] = solver->A; skipAscale = 1; } if (!skipAscale) { err = CphisMatCreate( &solver->Ascale[scale], numElements, elements, numLocalDOFScale, numLocalDOFScale, type, NULL ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } } // Aresidual, r, and e are only needed on higher scales, and their size is // determined by the maximum local d.o.f. in the next lower scale. if (scale > 0) { err = CphisVecCreate( &solver->r[scale], numElements, elements, numLocalDOFResidual, type, NULL ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } err = CphisVecCreate( &solver->e[scale], numElements, elements, numLocalDOFResidual, type, NULL ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } err = CphisMatCreate( &solver->Aresidual[scale], numElements, elements, numLocalDOFResidual, numLocalDOFScaleToResidual, type, NULL ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } } // Arhs, fscale, and uscale are only needed on HSS-type scales. Their size is // determined by the number of local d.o.f. that are smoothed on the current // scale. if (isHSSType) { err = CphisVecCreate( &solver->fscale[scale], numElements, elements, numLocalDOFScale, type, NULL ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } err = CphisVecCreate( &solver->uscale[scale], numElements, elements, numLocalDOFScale, type, NULL ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } err = CphisVecCreate( &solver->urhs[scale], numElements, elements, minLocalDOFScale, type, NULL ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } err = CphisMatCreate( &solver->Arhs[scale], numElements, elements, numLocalDOFScale, minLocalDOFScale, type, NULL ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } } // Extract the matrix blocks. for (CphisIndex i = 0; i < numRows; i++) { const int rowLocalDOF = i%numLocalDOF; if (rowLocalDOF > maxLocalDOFScale) { // Matrix row belongs to a higher scale, so we skip it. continue; } // Get matrix entries in this row. const CphisIndex *cols; const CphisScalar *vals; CphisIndex numEntries; err = CphisMatGetData( solver->A, i, &cols, &vals, &numEntries ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } // Check if the row belongs to the current scale and/or to lower scales. const int isRowInScale = rowLocalDOF >= minLocalDOFScale; const int isRowInResidual = rowLocalDOF <= maxLocalDOFResidual; for (CphisIndex j = 0; j < numEntries; j++) { const int colLocalDOF = cols[j]%numLocalDOF; if (colLocalDOF > maxLocalDOFScale) { // Column belongs to a higher scale, so we skip it. continue; } // Check if the row belongs to the current scale. const int isColInScale = colLocalDOF >= minLocalDOFScale; CphisIndex iBlock, jBlock; if (!skipAscale && isRowInScale && isColInScale) { // Entry goes to Ascale. iBlock = (i/numLocalDOF)*numLocalDOFScale + rowLocalDOF - minLocalDOFScale; jBlock = (cols[j]/numLocalDOF)*numLocalDOFScale + colLocalDOF - minLocalDOFScale; err = CphisMatSet( solver->Ascale[scale], iBlock, jBlock, vals[j] ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } } if (isRowInResidual && (!isErrorCorrectionScale || isColInScale)) { // Entry goes to Aresidual. iBlock = (i/numLocalDOF)*numLocalDOFResidual + rowLocalDOF; jBlock = (cols[j]/numLocalDOF)*numLocalDOFScaleToResidual + colLocalDOF - minLocalDOFScaleToResidual; err = CphisMatSet( solver->Aresidual[scale], iBlock, jBlock, vals[j] ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } } if (isRowInScale && !isColInScale) { // Entry goes to Arhs. iBlock = (i/numLocalDOF)*numLocalDOFScale + rowLocalDOF - minLocalDOFScale; jBlock = (cols[j]/numLocalDOF)*minLocalDOFScale + colLocalDOF; err = CphisMatSet( solver->Arhs[scale], iBlock, jBlock, vals[j] ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } } } } // Finalize matrices. if (!skipAscale) { err = CphisMatFinalize(solver->Ascale[scale]); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } } if (scale > 0) { err = CphisMatFinalize(solver->Aresidual[scale]); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } } if (isHSSType) { err = CphisMatFinalize(solver->Arhs[scale]); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } } // Set up scale solver. err = CphisScaleSolverSetup( solver->conf->solvers[scale], solver->Ascale[scale] ); if (err) { CphisSolverCleanupScale(solver, scale); CPHISCHECK(err); } return CPHIS_SUCCESS; } CphisError CphisSolverCreate( CphisSolver *solver, const CphisConf conf, const CphisMat A ) { CphisError err; #ifdef _OPENMP const double tStart = omp_get_wtime(); #endif *solver = malloc(sizeof(struct _CphisSolver)); if (!(*solver)) { CPHISCHECK(CPHIS_FAILED_ALLOC); } // Make sure that it is always safe to clean up. (*solver)->rfull = NULL; (*solver)->Ascale = NULL; (*solver)->Aresidual = NULL; (*solver)->Arhs = NULL; (*solver)->r = NULL; (*solver)->e = NULL; (*solver)->fscale = NULL; (*solver)->uscale = NULL; (*solver)->urhs = NULL; (*solver)->iterCoarseScale = 0; err = CphisVecCreate( &(*solver)->rfull, A->numElements, A->elements, A->numLocalDOFRange, A->type, NULL );CPHISCHECK(err); // Allocate arrays for matrix and vector handles. // The only possible errors here are failed allocations. err = CPHIS_FAILED_ALLOC; (*solver)->Ascale = malloc(conf->numScales*sizeof(CphisMat)); if (!(*solver)->Ascale) goto cphis_solver_create_cleanup; (*solver)->Aresidual = malloc(conf->numScales*sizeof(CphisMat)); if (!(*solver)->Aresidual) goto cphis_solver_create_cleanup; (*solver)->Arhs = malloc(conf->numScales*sizeof(CphisMat)); if (!(*solver)->Arhs) goto cphis_solver_create_cleanup; (*solver)->r = malloc(conf->numScales*sizeof(CphisVec)); if (!(*solver)->r) goto cphis_solver_create_cleanup; (*solver)->e = malloc(conf->numScales*sizeof(CphisVec)); if (!(*solver)->e) goto cphis_solver_create_cleanup; (*solver)->fscale = malloc(conf->numScales*sizeof(CphisVec)); if (!(*solver)->fscale) goto cphis_solver_create_cleanup; (*solver)->uscale = malloc(conf->numScales*sizeof(CphisVec)); if (!(*solver)->uscale) goto cphis_solver_create_cleanup; (*solver)->urhs = malloc(conf->numScales*sizeof(CphisVec)); if (!(*solver)->urhs) goto cphis_solver_create_cleanup; (*solver)->conf = conf; (*solver)->A = A; // Set up scales. for (int s = 0; s < conf->numScales; s++) { err = CphisSolverSetupScale(*solver, s); if (err) { // Clean up and abort. for (s--; s >= 0; s--) { CphisSolverCleanupScale(*solver, s); } goto cphis_solver_create_cleanup; } } // Initialize timers. memset((*solver)->timers, 0, CPHIS_NUM_TIMERS*sizeof(double)); #ifdef _OPENMP const double tEnd = omp_get_wtime(); (*solver)->timers[CPHIS_TIMER_SETUP] += tEnd - tStart; #endif return CPHIS_SUCCESS; cphis_solver_create_cleanup: CphisVecDestroy((*solver)->rfull); free((*solver)->Ascale); free((*solver)->Aresidual); free((*solver)->Arhs); free((*solver)->r); free((*solver)->e); free((*solver)->fscale); free((*solver)->uscale); free((*solver)->urhs); free(*solver); CPHISCHECK(err); return err; // Suppress compiler warning (missing return statement). } CphisError CphisSolverDestroy(CphisSolver solver) { // Clean up scales. for (int s = 0; s < solver->conf->numScales; s++) { CphisSolverCleanupScale(solver, s); } CphisVecDestroy(solver->rfull); free(solver); return CPHIS_SUCCESS; } CphisError CphisSolverSetTolRel(CphisSolver solver, CphisReal tol) { solver->conf->rtol = tol; return CPHIS_SUCCESS; } CphisError CphisSolverSetTolAbs(CphisSolver solver, CphisReal tol) { solver->conf->atol = tol; return CPHIS_SUCCESS; } static CphisError CphisSolverSolveCoarseScale( const CphisSolver solver, const CphisVec f, const CphisVec u ) { CphisError err; if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#0: Beginning coarse scale solve\n"); } #ifdef _OPENMP const double tStart = omp_get_wtime(); #endif CphisConvergenceFlag flag; CphisReal residual; int iter; err = CphisScaleSolverSolve( solver->conf->solvers[0], f, u, &flag, &residual, &iter );CPHISCHECK(err); solver->iterCoarseScale += iter; #ifdef _OPENMP const double tEnd = omp_get_wtime(); solver->timers[CPHIS_TIMER_COARSE_SOLVE] += tEnd - tStart; #endif if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf( "#0: Finished coarse scale solve\n (residual = %e, %d iter.)\n", residual, iter ); if (flag != CPHIS_CONVERGED) { CphisPrintf("#0: The coarse scale solver did not converge!\n"); } } return CPHIS_SUCCESS; } static CphisError CphisSolverSmoothHSS( const CphisSolver solver, const CphisVec f, CphisVec u, int scale, int isZeroInitialGuess ) { CphisError err; CphisScalar *fData, *fscaleData, *uData, *uscaleData, *urhsData; err = CphisVecGetData(f, &fData);CPHISCHECK(err); err = CphisVecGetData(solver->fscale[scale], &fscaleData);CPHISCHECK(err); err = CphisVecGetData(u, &uData);CPHISCHECK(err); err = CphisVecGetData(solver->uscale[scale], &uscaleData);CPHISCHECK(err); err = CphisVecGetData(solver->urhs[scale], &urhsData);CPHISCHECK(err); CphisIndex numElements; err = CphisVecGetNumElements(f, &numElements);CPHISCHECK(err); int numLocalDOFfu, numLocalDOFfuscale, numLocalDOFurhs; err = CphisVecGetNumLocalDOF(f, &numLocalDOFfu);CPHISCHECK(err); err = CphisVecGetNumLocalDOF( solver->fscale[scale], &numLocalDOFfuscale );CPHISCHECK(err); err = CphisVecGetNumLocalDOF( solver->urhs[scale], &numLocalDOFurhs );CPHISCHECK(err); if (!isZeroInitialGuess) { // We need to move the d.o.f. that are not smoothed on this scale to the // right-hand side. // First, split u into uscale and urhs. #pragma omp parallel for for (CphisIndex k = 0; k < numElements; k++) { for (int l = 0; l < numLocalDOFfu; l++) { const CphisIndex row = k*numLocalDOFfu + l; if (l < numLocalDOFurhs) { // Entry goes to urhs. urhsData[k*numLocalDOFurhs + l] = uData[row]; } else { // Entry goes to uscale. uscaleData[k*numLocalDOFfuscale + l - numLocalDOFurhs] = uData[row]; } } } // Compute right-hand side. err = CphisMatVec( solver->Arhs[scale], solver->urhs[scale], solver->fscale[scale] );CPHISCHECK(err); for (CphisIndex k = 0; k < numElements; k++) { for (int l = numLocalDOFurhs; l < numLocalDOFfu; l++) { fscaleData[k*numLocalDOFfuscale + l - numLocalDOFurhs] = fData[k*numLocalDOFfu + l] - fscaleData[k*numLocalDOFfuscale + l - numLocalDOFurhs]; } } } else { err = CphisVecSetAll(solver->uscale[scale], 0.0);CPHISCHECK(err); // Get fscale. for (CphisIndex k = 0; k < numElements; k++) { for (int l = numLocalDOFurhs; l < numLocalDOFfu; l++) { fscaleData[k*numLocalDOFfuscale + l - numLocalDOFurhs] = fData[k*numLocalDOFfu + l]; } } } // Apply the smoother to the smaller system. err = CphisScaleSolverSolve( solver->conf->solvers[scale], solver->fscale[scale], solver->uscale[scale], NULL, NULL, NULL );CPHISCHECK(err); // Use values from uscale to update u. #pragma omp parallel for for (CphisIndex k = 0; k < numElements; k++) { for (int l = numLocalDOFurhs; l < numLocalDOFfu; l++) { uData[k*numLocalDOFfu + l] = uscaleData[k*numLocalDOFfuscale + l - numLocalDOFurhs]; } } return CPHIS_SUCCESS; } static CphisError CphisSolverPresmooth( const CphisSolver solver, const CphisVec f, CphisVec u, int scale ) { CphisError err; if (solver->conf->nu1 == 0) return CPHIS_SUCCESS; if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Presmoothing\n", scale); } #ifdef _OPENMP const double tStart = omp_get_wtime(); #endif // Prepare smoother. err = CphisScaleSolverSetTol( solver->conf->solvers[scale], 0.0 );CPHISCHECK(err); err = CphisScaleSolverSetMaxIter( solver->conf->solvers[scale], solver->conf->nu1 );CPHISCHECK(err); const int isHSSType = solver->conf->scales[2*scale] != 0; if (isHSSType) { const int isErrorCorrectionScale = scale < solver->conf->numScales - 1; err = CphisSolverSmoothHSS( solver, f, u, scale, isErrorCorrectionScale );CPHISCHECK(err); } else { // On standard p-multigrid scales, all we have to do is smooth call the // smoother. err = CphisScaleSolverSolve( solver->conf->solvers[scale], f, u, NULL, NULL, NULL );CPHISCHECK(err); } #ifdef _OPENMP const double tEnd = omp_get_wtime(); solver->timers[CPHIS_TIMER_PRESMOOTH] += tEnd - tStart; #endif return CPHIS_SUCCESS; } static CphisError CphisSolverComputeRestrictedResidual( const CphisSolver solver, const CphisVec f, const CphisVec u, int scale ) { CphisError err; if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Computing restricted residual\n", scale); } #ifdef _OPENMP const double tStart = omp_get_wtime(); #endif const int isHSSType = solver->conf->scales[2*scale] != 0; const int isErrorCorrectionScale = scale < solver->conf->numScales - 1; if (isHSSType && isErrorCorrectionScale) { // Compute residual directly from uscale. err = CphisMatVec( solver->Aresidual[scale], solver->uscale[scale], solver->r[scale] );CPHISCHECK(err); } else { // We need to incorporate all of u in the residual computation. err = CphisMatVec( solver->Aresidual[scale], u, solver->r[scale] );CPHISCHECK(err); } CphisScalar *rData, *fData; err = CphisVecGetData(solver->r[scale], &rData);CPHISCHECK(err); err = CphisVecGetData(f, &fData);CPHISCHECK(err); CphisIndex numElements; err = CphisVecGetNumElements(f, &numElements);CPHISCHECK(err); int numLocalDOFr, numLocalDOFf; err = CphisVecGetNumLocalDOF(solver->r[scale], &numLocalDOFr);CPHISCHECK(err); err = CphisVecGetNumLocalDOF(f, &numLocalDOFf);CPHISCHECK(err); #pragma omp parallel for for (CphisIndex k = 0; k < numElements; k++) { for (int l = 0; l < numLocalDOFr; l++) { rData[k*numLocalDOFr + l] = fData[k*numLocalDOFf + l] - rData[k*numLocalDOFr + l]; } } #ifdef _OPENMP const double tEnd = omp_get_wtime(); solver->timers[CPHIS_TIMER_RESIDUAL] += tEnd - tStart; #endif return CPHIS_SUCCESS; } static CphisError CphisSolverProlongate( const CphisSolver solver, CphisVec u, int scale ) { CphisError err; if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Prolongation\n", scale); } #ifdef _OPENMP const double tStart = omp_get_wtime(); #endif CphisScalar *eData, *uData; err = CphisVecGetData(solver->e[scale], &eData);CPHISCHECK(err); err = CphisVecGetData(u, &uData);CPHISCHECK(err); CphisIndex numElements; err = CphisVecGetNumElements(u, &numElements);CPHISCHECK(err); int numLocalDOFe, numLocalDOFu; err = CphisVecGetNumLocalDOF(solver->e[scale], &numLocalDOFe);CPHISCHECK(err); err = CphisVecGetNumLocalDOF(u, &numLocalDOFu);CPHISCHECK(err); #pragma omp parallel for for (CphisIndex k = 0; k < numElements; k++) { for (int l = 0; l < numLocalDOFe; l++) { uData[k*numLocalDOFu + l] += eData[k*numLocalDOFe + l]; } } #ifdef _OPENMP const double tEnd = omp_get_wtime(); solver->timers[CPHIS_TIMER_PROLONGATION] += tEnd - tStart; #endif return CPHIS_SUCCESS; } static CphisError CphisSolverPostsmooth( const CphisSolver solver, const CphisVec f, CphisVec u, int scale ) { CphisError err; if (solver->conf->nu2 == 0) return CPHIS_SUCCESS; if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Postsmoothing\n", scale); } #ifdef _OPENMP const double tStart = omp_get_wtime(); #endif // Prepare smoother. err = CphisScaleSolverSetMaxIter( solver->conf->solvers[scale], solver->conf->nu2 );CPHISCHECK(err); const int isHSSType = solver->conf->scales[2*scale] != 0; if (isHSSType) { err = CphisSolverSmoothHSS(solver, f, u, scale, 0);CPHISCHECK(err); } else { // On standard p-multigrid scales, all we have to do is smooth call the // smoother. err = CphisScaleSolverSolve( solver->conf->solvers[scale], f, u, NULL, NULL, NULL );CPHISCHECK(err); } #ifdef _OPENMP const double tEnd = omp_get_wtime(); solver->timers[CPHIS_TIMER_POSTSMOOTH] += tEnd - tStart; #endif return CPHIS_SUCCESS; } static CphisError CphisSolverCycleV( const CphisSolver solver, const CphisVec f, CphisVec u, int scale ) { CphisError err; if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Entering\n", scale); } if (scale == 0) { err = CphisSolverSolveCoarseScale(solver, f, u);CPHISCHECK(err); if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#0: Leaving\n"); } return CPHIS_SUCCESS; } err = CphisSolverPresmooth(solver, f, u, scale);CPHISCHECK(err); err = CphisSolverComputeRestrictedResidual( solver, f, u, scale );CPHISCHECK(err); // A V-cycle is just a simple recursion. err = CphisVecSetAll(solver->e[scale], 0.0);CPHISCHECK(err); err = CphisSolverCycleV( solver, solver->r[scale], solver->e[scale], scale - 1 );CPHISCHECK(err); err = CphisSolverProlongate(solver, u, scale);CPHISCHECK(err); err = CphisSolverPostsmooth(solver, f, u, scale);CPHISCHECK(err); if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Leaving\n", scale); } return CPHIS_SUCCESS; } static CphisError CphisSolverCycleW( const CphisSolver solver, const CphisVec f, CphisVec u, int scale ) { CphisError err; if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Entering\n", scale); } if (scale == 0) { err = CphisSolverSolveCoarseScale(solver, f, u);CPHISCHECK(err); if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#0: Leaving\n"); } return CPHIS_SUCCESS; } err = CphisSolverPresmooth(solver, f, u, scale);CPHISCHECK(err); err = CphisSolverComputeRestrictedResidual( solver, f, u, scale );CPHISCHECK(err); // A W-cycle recurses twice. err = CphisVecSetAll(solver->e[scale], 0.0);CPHISCHECK(err); err = CphisSolverCycleW( solver, solver->r[scale], solver->e[scale], scale - 1 );CPHISCHECK(err); err = CphisSolverCycleW( solver, solver->r[scale], solver->e[scale], scale - 1 );CPHISCHECK(err); err = CphisSolverProlongate(solver, u, scale);CPHISCHECK(err); err = CphisSolverPostsmooth(solver, f, u, scale);CPHISCHECK(err); if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Leaving\n", scale); } return CPHIS_SUCCESS; } static CphisError CphisSolverCycleF( const CphisSolver solver, const CphisVec f, CphisVec u, int scale ) { CphisError err; if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Entering\n", scale); } if (scale == 0) { err = CphisSolverSolveCoarseScale(solver, f, u);CPHISCHECK(err); if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#0: Leaving\n"); } return CPHIS_SUCCESS; } err = CphisSolverPresmooth(solver, f, u, scale);CPHISCHECK(err); err = CphisSolverComputeRestrictedResidual( solver, f, u, scale );CPHISCHECK(err); // Like the W-cycle, the F-cycle recurses twice. // However, the second recursion is a simple V-cycle. err = CphisVecSetAll(solver->e[scale], 0.0);CPHISCHECK(err); err = CphisSolverCycleF( solver, solver->r[scale], solver->e[scale], scale - 1 );CPHISCHECK(err); err = CphisSolverCycleV( solver, solver->r[scale], solver->e[scale], scale - 1 );CPHISCHECK(err); err = CphisSolverProlongate(solver, u, scale);CPHISCHECK(err); err = CphisSolverPostsmooth(solver, f, u, scale);CPHISCHECK(err); if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { CphisPrintf("#%d: Leaving\n", scale); } return CPHIS_SUCCESS; } static CphisError CphisSolverCycle( const CphisSolver solver, const CphisVec f, CphisVec u ) { CphisError err; // Enter recursion based on cycle type. switch (solver->conf->cycle) { case CPHIS_CYCLE_V: err = CphisSolverCycleV( solver, f, u, solver->conf->numScales - 1 );CPHISCHECK(err); break; case CPHIS_CYCLE_W: err = CphisSolverCycleW( solver, f, u, solver->conf->numScales - 1 );CPHISCHECK(err); break; case CPHIS_CYCLE_F: err = CphisSolverCycleF( solver, f, u, solver->conf->numScales - 1 );CPHISCHECK(err); break; default: CPHISCHECK(CPHIS_UNKNOWN_TYPE); break; } return CPHIS_SUCCESS; } static CphisError CphisSolverPrintTimers(const CphisSolver solver) { CphisPrintf("CPHIS Timers:\n"); #ifdef _OPENMP CphisPrintf( " Setup: %.3f s\n", solver->timers[CPHIS_TIMER_SETUP] ); CphisPrintf( " Solve: %.3f s\n", solver->timers[CPHIS_TIMER_SOLVER] ); CphisPrintf( " Presmooth: %.3f s\n", solver->timers[CPHIS_TIMER_PRESMOOTH] ); CphisPrintf( " Postsmooth: %.3f s\n", solver->timers[CPHIS_TIMER_POSTSMOOTH] ); CphisPrintf( " Coarse scale solves: %.3f s\n", solver->timers[CPHIS_TIMER_COARSE_SOLVE] ); CphisPrintf( " Multigrid residual & restriction: %.3f s\n", solver->timers[CPHIS_TIMER_RESIDUAL] ); CphisPrintf( " Prolongation: %.3f s\n", solver->timers[CPHIS_TIMER_PROLONGATION] ); CphisPrintf( " System residual check: %.3f s\n", solver->timers[CPHIS_TIMER_SYSTEM_RESIDUAL] ); // Compute and print time delta, i.e., the difference between the total solver // time and the sum of the individual timers. This time difference is caused // by terminal output etc. double tDelta = 0.0; for (int i = 0; i < CPHIS_NUM_TIMERS; i++) { if (i == CPHIS_TIMER_SETUP || i == CPHIS_TIMER_SOLVER) continue; tDelta += solver->timers[i]; } tDelta = solver->timers[CPHIS_TIMER_SOLVER] - tDelta; CphisPrintf(" Time delta: %.3f s\n", tDelta); const double wtick = omp_get_wtick(); CphisPrintf(" Timer resolution: %.3e s\n", wtick); #else CphisPrintf(" Timers require OpenMP to be enabled!\n"); #endif return CPHIS_SUCCESS; } CphisError CphisSolverSolve( const CphisSolver solver, const CphisVec b, CphisVec x, CphisConvergenceFlag *flag, CphisReal *residual, int *iter ) { CphisError err; int k = 0; CphisReal rNorm, r0Norm; #ifdef _OPENMP const double tStart = omp_get_wtime(); #endif // Reset number of coarse scale solver iterations. solver->iterCoarseScale = 0; // Compute initial residual norm (only if used as a stopping criterion). const int computeResidual = solver->conf->rtol > 0.0 || solver->conf->atol > 0.0; if (computeResidual) { err = CphisMatVec(solver->A, x, solver->rfull);CPHISCHECK(err); err = CphisVecAXPY(-1.0, b, solver->rfull);CPHISCHECK(err); err = CphisVecNorm2(solver->rfull, &r0Norm);CPHISCHECK(err); } rNorm = r0Norm; #ifdef _OPENMP solver->timers[CPHIS_TIMER_SYSTEM_RESIDUAL] += omp_get_wtime() - tStart; #endif if (solver->conf->verbosity >= CPHIS_VERBOSITY_SUMMARY) { CphisPrintHline(1); CphisPrintf("CPHIS: Beginning solve\n"); CphisPrintHline(1); } while (1) { // Check relative residual norm. if (solver->conf->rtol > 0.0 && rNorm/r0Norm < solver->conf->rtol) { if (flag) *flag = CPHIS_CONVERGED; break; } // Check absolute residual norm. if (solver->conf->atol > 0.0 && rNorm < solver->conf->atol) { if (flag) *flag = CPHIS_CONVERGED; break; } // Check for maximum number of iterations. if (k >= solver->conf->maxIter) { if (flag) *flag = CPHIS_MAX_ITER; break; } if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { if (k > 0) CphisPrintHline(0); CphisPrintf("Beginning iteration #%d\n", k + 1); } // Perform one full cycle. err = CphisSolverCycle(solver, b, x);CPHISCHECK(err); // Compute residual norm (only if used as a stopping criterion. #ifdef _OPENMP const double tStartResidual = omp_get_wtime(); #endif if (computeResidual) { err = CphisMatVec(solver->A, x, solver->rfull);CPHISCHECK(err); err = CphisVecAXPY(-1.0, b, solver->rfull);CPHISCHECK(err); err = CphisVecNorm2(solver->rfull, &rNorm);CPHISCHECK(err); } #ifdef _OPENMP const double tEndResidual = omp_get_wtime(); solver->timers[CPHIS_TIMER_SYSTEM_RESIDUAL] += tEndResidual - tStartResidual; #endif if (solver->conf->verbosity >= CPHIS_VERBOSITY_DETAILED) { if (computeResidual) { CphisPrintf( "End of iteration #%d (residual = %e)\n", k + 1, rNorm/r0Norm ); } else { CphisPrintf("End of iteration #%d (residual not computed)\n", k + 1); } } k++; } #ifdef _OPENMP const double tEnd = omp_get_wtime(); solver->timers[CPHIS_TIMER_SOLVER] += tEnd - tStart; #endif if (solver->conf->verbosity >= CPHIS_VERBOSITY_SUMMARY) { CphisPrintHline(1); } if (solver->conf->verbosity >= CPHIS_VERBOSITY_SUMMARY) { if (computeResidual && rNorm/r0Norm < solver->conf->rtol) { CphisPrintf( "CPHIS: Solver converged to desired rel. tolerance of %.3e!\n", solver->conf->rtol ); } if (computeResidual && rNorm < solver->conf->atol) { CphisPrintf( "CPHIS: Solver converged to desired abs. tolerance of %.3e!\n", solver->conf->atol ); } if (k >= solver->conf->maxIter) { CphisPrintf("CPHIS: Solver reached the maximum number of iterations!\n"); } if (computeResidual) { CphisPrintf( " Rel. residual: %e\n", rNorm/r0Norm ); } else { CphisPrintf(" Rel. residual: not computed\n"); } CphisPrintf(" #Iterations: %d\n", k); CphisPrintf( " #Iterations (coarse scale solver): %d\n", solver->iterCoarseScale ); CphisPrintf( " #Iterations (smoothers): %d\n", k*(solver->conf->numScales - 1)*(solver->conf->nu1 + solver->conf->nu2) ); CphisPrintHline(0); err = CphisSolverPrintTimers(solver);CPHISCHECK(err); } if (solver->conf->verbosity >= CPHIS_VERBOSITY_SUMMARY) { CphisPrintHline(1); } if (computeResidual && residual) *residual = rNorm/r0Norm; if (iter) *iter = k; return CPHIS_SUCCESS; }
trsm_x_csc_u_lo_col.c
#include "alphasparse/opt.h" #include "alphasparse/kernel.h" #include "alphasparse/util.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns;out_y_col++){ for(int i = 0 ; i < n ; i++){ //initialize y[] as x[]*aplha alpha_mul(y[index2(out_y_col,i,ldy)], alpha, x[index2(out_y_col,i,ldx)]); } //following processing simulates Gaussian Elimination for(ALPHA_INT c = 0; c < n; ++c){//csc format, traverse by column for(ALPHA_INT ai = A->cols_start[c]; ai < A->cols_end[c];ai++){ ALPHA_INT ar = A->row_indx[ai]; if(c < ar){ alpha_msube(y[index2(out_y_col,ar,ldy)], A->values[ai], y[index2(out_y_col,c,ldy)]); } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
serial_tree_learner.h
#ifndef LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #define LIGHTGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_ #include <LightGBM/utils/random.h> #include <LightGBM/utils/array_args.h> #include <LightGBM/tree_learner.h> #include <LightGBM/dataset.h> #include <LightGBM/tree.h> #include "feature_histogram.hpp" #include "split_info.hpp" #include "data_partition.hpp" #include "leaf_splits.hpp" #include <cstdio> #include <vector> #include <random> #include <cmath> #include <memory> namespace LightGBM { /*! * \brief Used for learning a tree by single machine */ class SerialTreeLearner: public TreeLearner { public: explicit SerialTreeLearner(const TreeConfig* tree_config); ~SerialTreeLearner(); void Init(const Dataset* train_data) override; void ResetTrainingData(const Dataset* train_data) override; void ResetConfig(const TreeConfig* tree_config) override; Tree* Train(const score_t* gradients, const score_t *hessians) override; void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override { data_partition_->SetUsedDataIndices(used_indices, num_data); } void AddPredictionToScore(double* out_score) const override { if (last_trained_tree_->num_leaves() <= 1) { return; } #pragma omp parallel for schedule(static) for (int i = 0; i < data_partition_->num_leaves(); ++i) { double output = static_cast<double>(last_trained_tree_->LeafOutput(i)); data_size_t cnt_leaf_data = 0; auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data); for (data_size_t j = 0; j < cnt_leaf_data; ++j) { out_score[tmp_idx[j]] += output; } } } protected: /*! * \brief Some initial works before training */ virtual void BeforeTrain(); /*! * \brief Some initial works before FindBestSplit */ virtual bool BeforeFindBestSplit(int left_leaf, int right_leaf); /*! * \brief Find best thresholds for all features, using multi-threading. * The result will be stored in smaller_leaf_splits_ and larger_leaf_splits_. * This function will be called in FindBestSplit. */ virtual void FindBestThresholds(); /*! * \brief Find best features for leaves from smaller_leaf_splits_ and larger_leaf_splits_. * This function will be called after FindBestThresholds. */ virtual void FindBestSplitsForLeaves(); /*! * \brief Partition tree and data according best split. * \param tree Current tree, will be splitted on this function. * \param best_leaf The index of leaf that will be splitted. * \param left_leaf The index of left leaf after splitted. * \param right_leaf The index of right leaf after splitted. */ virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf); /*! * \brief Get the number of data in a leaf * \param leaf_idx The index of leaf * \return The number of data in the leaf_idx leaf */ inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const; /*! \brief Last trained decision tree */ const Tree* last_trained_tree_; /*! \brief number of data */ data_size_t num_data_; /*! \brief number of features */ int num_features_; /*! \brief training data */ const Dataset* train_data_; /*! \brief gradients of current iteration */ const score_t* gradients_; /*! \brief hessians of current iteration */ const score_t* hessians_; /*! \brief training data partition on leaves */ std::unique_ptr<DataPartition> data_partition_; /*! \brief used for generate used features */ Random random_; /*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */ std::vector<int8_t> is_feature_used_; /*! \brief pointer to histograms array of parent of current leaves */ FeatureHistogram* parent_leaf_histogram_array_; /*! \brief pointer to histograms array of smaller leaf */ FeatureHistogram* smaller_leaf_histogram_array_; /*! \brief pointer to histograms array of larger leaf */ FeatureHistogram* larger_leaf_histogram_array_; /*! \brief store best split points for all leaves */ std::vector<SplitInfo> best_split_per_leaf_; /*! \brief stores best thresholds for all feature for smaller leaf */ std::unique_ptr<LeafSplits> smaller_leaf_splits_; /*! \brief stores best thresholds for all feature for larger leaf */ std::unique_ptr<LeafSplits> larger_leaf_splits_; /*! \brief gradients of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_gradients_; /*! \brief hessians of current iteration, ordered for cache optimized */ std::vector<score_t> ordered_hessians_; /*! \brief Store ordered bin */ std::vector<std::unique_ptr<OrderedBin>> ordered_bins_; /*! \brief True if has ordered bin */ bool has_ordered_bin_ = false; /*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */ std::vector<char> is_data_in_leaf_; /*! \brief used to cache historical histogram to speed up*/ HistogramPool histogram_pool_; /*! \brief config of tree learner*/ const TreeConfig* tree_config_; int num_threads_; std::vector<int> ordered_bin_indices_; }; inline data_size_t SerialTreeLearner::GetGlobalDataCountInLeaf(int leafIdx) const { if (leafIdx >= 0) { return data_partition_->leaf_count(leafIdx); } else { return 0; } } } // namespace LightGBM #endif // LightGBM_TREELEARNER_SERIAL_TREE_LEARNER_H_
SpatialZeroPadding.c
#include <string.h> #include "../thnets.h" int nnload_SpatialZeroPadding(struct module *mod, struct nnmodule *n) { struct table *t = n->table; mod->type = MT_SpatialZeroPadding; mod->updateOutput = nn_SpatialZeroPadding_updateOutput; struct SpatialZeroPadding *m = &mod->SpatialZeroPadding; m->pad_l = TableGetNumber(t, "pad_l"); m->pad_r = TableGetNumber(t, "pad_r"); m->pad_t = TableGetNumber(t, "pad_t"); m->pad_b = TableGetNumber(t, "pad_b"); return 0; } THFloatTensor *nn_SpatialZeroPadding_updateOutput(struct module *module, THFloatTensor *input) { int idim = input->nDimension; if(idim != 3 && idim != 4) THError("input dimension must be 3 or 4"); int pad_l = module->SpatialZeroPadding.pad_l; int pad_r = module->SpatialZeroPadding.pad_r; int pad_t = module->SpatialZeroPadding.pad_t; int pad_b = module->SpatialZeroPadding.pad_b; int iw = (int)input->size[idim-1]; int ih = (int)input->size[idim-2]; int ow = iw + pad_l + pad_r; int oh = ih + pad_t + pad_b; int ix1 = pad_l < 0 ? -pad_l : 0; int iy1 = pad_t < 0 ? -pad_t : 0; int ix2 = pad_r < 0 ? iw + pad_r : iw; int iy2 = pad_b < 0 ? ih + pad_b : ih; if(idim == 3) THFloatTensor_resize3d(module->output, input->size[0], oh, ow); else THFloatTensor_resize4d(module->output, input->size[0], input->size[1], oh, ow); int batchsize = idim == 4 ? (int)input->size[0] : 1; int batch, plane, y; int istride = (int)input->size[idim-2]; #pragma omp parallel for private(batch) for(batch = 0; batch < batchsize; batch++) for(plane = 0; plane < input->size[idim - 3]; plane++) { float *in = THFloatTensor_data(input) + batch * input->stride[0] + plane * input->stride[idim-3]; float *out = THFloatTensor_data(module->output) + batch * module->output->stride[0] + plane * module->output->stride[idim-3]; if(pad_t > 0) memset(out, 0, ow * pad_t * sizeof(*out)); if(pad_b > 0) memset(out + (pad_t + ih) * ow, 0, ow * pad_b * sizeof(*out)); for(y = iy1; y < iy2; y++) { if(pad_l > 0) memset(out + (y + pad_t) * ow, 0, pad_l * sizeof(*out)); if(pad_r > 0) memset(out + (y + pad_t) * ow + pad_l + ow, 0, pad_r * sizeof(*out)); memcpy(out + (y + pad_t) * ow + (pad_l < 0 ? 0 : pad_l), in + y * istride + ix1, (ix2-ix1) * sizeof(*out)); } } return module->output; }
DRB048-firstprivate-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> /* Example use of firstprivate() */ #include <omp.h> void foo(int *a,int n,int g) { int i; #pragma omp parallel for private (i) firstprivate (n,g) for (i = 0; i <= n - 1; i += 1) { a[i] = a[i] + g; } } int a[100]; int main() { int i; int n = 100; #pragma omp parallel for private (i) for (i = 0; i <= n - 1; i += 1) { a[i] = i; } foo(a,100,7); for (i = 0; i <= n - 1; i += 1) { printf("%d\n",a[i]); } return 0; }
10.norace5.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> int main() { int x = 0; #pragma omp parallel num_threads(8) { #pragma omp sections lastprivate(x) { { x = 1; } #pragma omp section { x = 2; } } } return x; } // CHECK: Region is Data Race Free. // END
GB_unaryop__minv_uint32_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_uint32_int64 // op(A') function: GB_tran__minv_uint32_int64 // C type: uint32_t // A type: int64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = GB_IMINV_UNSIGNED (aij, 32) #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_UNSIGNED (x, 32) ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_UINT32 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_uint32_int64 ( uint32_t *restrict Cx, const int64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_uint32_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_pack4to16.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m512 _sum = _mm512_setzero_ps(); if (bias_data_ptr) { _sum = _mm512_loadu_ps(bias_data_ptr + p * 16); } const float* kptr = weight_data_packed.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) { const float* slptr = sptr + space_ofs[k] * 4; __m512 _val0 = _mm512_set1_ps(slptr[0]); __m512 _val1 = _mm512_set1_ps(slptr[1]); __m512 _val2 = _mm512_set1_ps(slptr[2]); __m512 _val3 = _mm512_set1_ps(slptr[3]); __m512 _w0 = _mm512_load_ps(kptr); __m512 _w1 = _mm512_load_ps(kptr + 16); __m512 _w2 = _mm512_load_ps(kptr + 32); __m512 _w3 = _mm512_load_ps(kptr + 48); _sum = _mm512_fmadd_ps(_val0, _w0, _sum); _sum = _mm512_fmadd_ps(_val1, _w1, _sum); _sum = _mm512_fmadd_ps(_val2, _w2, _sum); _sum = _mm512_fmadd_ps(_val3, _w3, _sum); kptr += 64; } } _sum = activation_avx512(_sum, activation_type, activation_params); _mm512_store_ps(outptr, _sum); outptr += 16; } } } }
wino_conv_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: haoluo@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "wino_conv_kernel_x86.h" #define TILE 4 #define ELEM_SIZE ((TILE + 2) * (TILE + 2)) #define WINO_MAX(a, b) ((a) > (b) ? (a) : (b)) #define WINO_MIN(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = WINO_MAX(data[i], ( float )0); if (activation > 0) { data[i] = WINO_MIN(data[i], ( float )activation); } } } static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param) { int output_c = filter->dims[0]; int input_c = filter->dims[1]; int trans_ker_size = (unsigned long)output_c * input_c * ELEM_SIZE * sizeof(float); return trans_ker_size + 128; // caution } static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, (unsigned long)m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float)); } } // pad 0 in right and down side on 3D static void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, (unsigned long)c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w); } } static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, (unsigned long)m * n * sizeof(float)); return; } for (i = 0; i < m; ++i) { memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float)); } } // pad 0 in right and down side on 3D static void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w) { int i; if (n >= n_align && m >= m_align) { memcpy(dst, src, (unsigned long)c * m * n * sizeof(float)); return; } for (i = 0; i < c; ++i) { delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w); } } void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block, float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch, int outw, int outh, int outch, int num_thread) { size_t elemsize = sizeof(float); const float* bias = _bias; // pad to 4n+2, winograd F(4,3) float* bottom_blob_bordered = bottom_blob; int outw_align = (outw + 3) / 4 * 4; int outh_align = (outh + 3) / 4 * 4; w = outw_align + 2; h = outh_align + 2; // BEGIN transform input float* bottom_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 4 * inch * tiles; bottom_blob_tm = transform_input; // BT // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r03 + r04 // 2 = 4 * (r01 - r02) - r03 + r04 // 3 = -2 * r01 - r02 + 2 * r03 + r04 // 4 = 2 * r01 - r02 - 2 * r03 + r04 // 5 = 4 * r01 - 5 * r03 + r05 #if __AVX__ __m256 _1_n = _mm256_set1_ps(-1); __m256 _2_p = _mm256_set1_ps(2); __m256 _2_n = _mm256_set1_ps(-2); __m256 _4_p = _mm256_set1_ps(4); __m256 _4_n = _mm256_set1_ps(-4); __m256 _5_n = _mm256_set1_ps(-5); #endif #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < inch; q++) { const float* img = bottom_blob_bordered + q * w * h; for (int j = 0; j < nColBlocks; j++) { const float* r0 = img + w * j * 4; const float* r1 = r0 + w; const float* r2 = r1 + w; const float* r3 = r2 + w; const float* r4 = r3 + w; const float* r5 = r4 + w; for (int i = 0; i < nRowBlocks; i++) { float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q; float* out_tm1 = out_tm0 + tiles_n; float* out_tm2 = out_tm0 + 2 * tiles_n; float* out_tm3 = out_tm0 + 3 * tiles_n; float* out_tm4 = out_tm0 + 4 * tiles_n; float* out_tm5 = out_tm0 + 5 * tiles_n; float* out_tm6 = out_tm0 + 6 * tiles_n; float* out_tm7 = out_tm0 + 7 * tiles_n; float* out_tm8 = out_tm0 + 8 * tiles_n; #if __AVX__ __m256 _d0, _d1, _d2, _d3, _d4, _d5; __m256 _w0, _w1, _w2, _w3, _w4, _w5; __m256 _t0, _t1, _t2, _t3, _t4, _t5; __m256 _n0, _n1, _n2, _n3, _n4, _n5; // load _d0 = _mm256_loadu_ps(r0); _d1 = _mm256_loadu_ps(r1); _d2 = _mm256_loadu_ps(r2); _d3 = _mm256_loadu_ps(r3); _d4 = _mm256_loadu_ps(r4); _d5 = _mm256_loadu_ps(r5); // w = B_t * d _w0 = _mm256_mul_ps(_d0, _4_p); _w0 = _mm256_fmadd_ps(_d2, _5_n, _w0); _w0 = _mm256_add_ps(_w0, _d4); _w1 = _mm256_mul_ps(_d1, _4_n); _w1 = _mm256_fmadd_ps(_d2, _4_n, _w1); _w1 = _mm256_add_ps(_w1, _d3); _w1 = _mm256_add_ps(_w1, _d4); _w2 = _mm256_mul_ps(_d1, _4_p); _w2 = _mm256_fmadd_ps(_d2, _4_n, _w2); _w2 = _mm256_fmadd_ps(_d3, _1_n, _w2); _w2 = _mm256_add_ps(_w2, _d4); _w3 = _mm256_mul_ps(_d1, _2_n); _w3 = _mm256_fmadd_ps(_d2, _1_n, _w3); _w3 = _mm256_fmadd_ps(_d3, _2_p, _w3); _w3 = _mm256_add_ps(_w3, _d4); _w4 = _mm256_mul_ps(_d1, _2_p); _w4 = _mm256_fmadd_ps(_d2, _1_n, _w4); _w4 = _mm256_fmadd_ps(_d3, _2_n, _w4); _w4 = _mm256_add_ps(_w4, _d4); _w5 = _mm256_mul_ps(_d1, _4_p); _w5 = _mm256_fmadd_ps(_d3, _5_n, _w5); _w5 = _mm256_add_ps(_w5, _d5); // transpose d to d_t #ifdef _WIN32 { _t0.m256_f32[0] = _w0.m256_f32[0]; _t1.m256_f32[0] = _w0.m256_f32[1]; _t2.m256_f32[0] = _w0.m256_f32[2]; _t3.m256_f32[0] = _w0.m256_f32[3]; _t4.m256_f32[0] = _w0.m256_f32[4]; _t5.m256_f32[0] = _w0.m256_f32[5]; _t0.m256_f32[1] = _w1.m256_f32[0]; _t1.m256_f32[1] = _w1.m256_f32[1]; _t2.m256_f32[1] = _w1.m256_f32[2]; _t3.m256_f32[1] = _w1.m256_f32[3]; _t4.m256_f32[1] = _w1.m256_f32[4]; _t5.m256_f32[1] = _w1.m256_f32[5]; _t0.m256_f32[2] = _w2.m256_f32[0]; _t1.m256_f32[2] = _w2.m256_f32[1]; _t2.m256_f32[2] = _w2.m256_f32[2]; _t3.m256_f32[2] = _w2.m256_f32[3]; _t4.m256_f32[2] = _w2.m256_f32[4]; _t5.m256_f32[2] = _w2.m256_f32[5]; _t0.m256_f32[3] = _w3.m256_f32[0]; _t1.m256_f32[3] = _w3.m256_f32[1]; _t2.m256_f32[3] = _w3.m256_f32[2]; _t3.m256_f32[3] = _w3.m256_f32[3]; _t4.m256_f32[3] = _w3.m256_f32[4]; _t5.m256_f32[3] = _w3.m256_f32[5]; _t0.m256_f32[4] = _w4.m256_f32[0]; _t1.m256_f32[4] = _w4.m256_f32[1]; _t2.m256_f32[4] = _w4.m256_f32[2]; _t3.m256_f32[4] = _w4.m256_f32[3]; _t4.m256_f32[4] = _w4.m256_f32[4]; _t5.m256_f32[4] = _w4.m256_f32[5]; _t0.m256_f32[5] = _w5.m256_f32[0]; _t1.m256_f32[5] = _w5.m256_f32[1]; _t2.m256_f32[5] = _w5.m256_f32[2]; _t3.m256_f32[5] = _w5.m256_f32[3]; _t4.m256_f32[5] = _w5.m256_f32[4]; _t5.m256_f32[5] = _w5.m256_f32[5]; } #else { _t0[0] = _w0[0]; _t1[0] = _w0[1]; _t2[0] = _w0[2]; _t3[0] = _w0[3]; _t4[0] = _w0[4]; _t5[0] = _w0[5]; _t0[1] = _w1[0]; _t1[1] = _w1[1]; _t2[1] = _w1[2]; _t3[1] = _w1[3]; _t4[1] = _w1[4]; _t5[1] = _w1[5]; _t0[2] = _w2[0]; _t1[2] = _w2[1]; _t2[2] = _w2[2]; _t3[2] = _w2[3]; _t4[2] = _w2[4]; _t5[2] = _w2[5]; _t0[3] = _w3[0]; _t1[3] = _w3[1]; _t2[3] = _w3[2]; _t3[3] = _w3[3]; _t4[3] = _w3[4]; _t5[3] = _w3[5]; _t0[4] = _w4[0]; _t1[4] = _w4[1]; _t2[4] = _w4[2]; _t3[4] = _w4[3]; _t4[4] = _w4[4]; _t5[4] = _w4[5]; _t0[5] = _w5[0]; _t1[5] = _w5[1]; _t2[5] = _w5[2]; _t3[5] = _w5[3]; _t4[5] = _w5[4]; _t5[5] = _w5[5]; } #endif // d = B_t * d_t _n0 = _mm256_mul_ps(_t0, _4_p); _n0 = _mm256_fmadd_ps(_t2, _5_n, _n0); _n0 = _mm256_add_ps(_n0, _t4); _n1 = _mm256_mul_ps(_t1, _4_n); _n1 = _mm256_fmadd_ps(_t2, _4_n, _n1); _n1 = _mm256_add_ps(_n1, _t3); _n1 = _mm256_add_ps(_n1, _t4); _n2 = _mm256_mul_ps(_t1, _4_p); _n2 = _mm256_fmadd_ps(_t2, _4_n, _n2); _n2 = _mm256_fmadd_ps(_t3, _1_n, _n2); _n2 = _mm256_add_ps(_n2, _t4); _n3 = _mm256_mul_ps(_t1, _2_n); _n3 = _mm256_fmadd_ps(_t2, _1_n, _n3); _n3 = _mm256_fmadd_ps(_t3, _2_p, _n3); _n3 = _mm256_add_ps(_n3, _t4); _n4 = _mm256_mul_ps(_t1, _2_p); _n4 = _mm256_fmadd_ps(_t2, _1_n, _n4); _n4 = _mm256_fmadd_ps(_t3, _2_n, _n4); _n4 = _mm256_add_ps(_n4, _t4); _n5 = _mm256_mul_ps(_t1, _4_p); _n5 = _mm256_fmadd_ps(_t3, _5_n, _n5); _n5 = _mm256_add_ps(_n5, _t5); // save to out_tm float output_n0[8] = {0.f}; _mm256_storeu_ps(output_n0, _n0); float output_n1[8] = {0.f}; _mm256_storeu_ps(output_n1, _n1); float output_n2[8] = {0.f}; _mm256_storeu_ps(output_n2, _n2); float output_n3[8] = {0.f}; _mm256_storeu_ps(output_n3, _n3); float output_n4[8] = {0.f}; _mm256_storeu_ps(output_n4, _n4); float output_n5[8] = {0.f}; _mm256_storeu_ps(output_n5, _n5); out_tm0[0] = output_n0[0]; out_tm0[1] = output_n0[1]; out_tm0[2] = output_n0[2]; out_tm0[3] = output_n0[3]; out_tm1[0] = output_n0[4]; out_tm1[1] = output_n0[5]; out_tm1[2] = output_n1[0]; out_tm1[3] = output_n1[1]; out_tm2[0] = output_n1[2]; out_tm2[1] = output_n1[3]; out_tm2[2] = output_n1[4]; out_tm2[3] = output_n1[5]; out_tm3[0] = output_n2[0]; out_tm3[1] = output_n2[1]; out_tm3[2] = output_n2[2]; out_tm3[3] = output_n2[3]; out_tm4[0] = output_n2[4]; out_tm4[1] = output_n2[5]; out_tm4[2] = output_n3[0]; out_tm4[3] = output_n3[1]; out_tm5[0] = output_n3[2]; out_tm5[1] = output_n3[3]; out_tm5[2] = output_n3[4]; out_tm5[3] = output_n3[5]; out_tm6[0] = output_n4[0]; out_tm6[1] = output_n4[1]; out_tm6[2] = output_n4[2]; out_tm6[3] = output_n4[3]; out_tm7[0] = output_n4[4]; out_tm7[1] = output_n4[5]; out_tm7[2] = output_n5[0]; out_tm7[3] = output_n5[1]; out_tm8[0] = output_n5[2]; out_tm8[1] = output_n5[3]; out_tm8[2] = output_n5[4]; out_tm8[3] = output_n5[5]; #else float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6]; float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6]; float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6]; // load for (int n = 0; n < 6; n++) { d0[n] = r0[n]; d1[n] = r1[n]; d2[n] = r2[n]; d3[n] = r3[n]; d4[n] = r4[n]; d5[n] = r5[n]; } // w = B_t * d for (int n = 0; n < 6; n++) { w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n]; w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n]; w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n]; w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n]; w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n]; w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n]; } // transpose d to d_t { t0[0] = w0[0]; t1[0] = w0[1]; t2[0] = w0[2]; t3[0] = w0[3]; t4[0] = w0[4]; t5[0] = w0[5]; t0[1] = w1[0]; t1[1] = w1[1]; t2[1] = w1[2]; t3[1] = w1[3]; t4[1] = w1[4]; t5[1] = w1[5]; t0[2] = w2[0]; t1[2] = w2[1]; t2[2] = w2[2]; t3[2] = w2[3]; t4[2] = w2[4]; t5[2] = w2[5]; t0[3] = w3[0]; t1[3] = w3[1]; t2[3] = w3[2]; t3[3] = w3[3]; t4[3] = w3[4]; t5[3] = w3[5]; t0[4] = w4[0]; t1[4] = w4[1]; t2[4] = w4[2]; t3[4] = w4[3]; t4[4] = w4[4]; t5[4] = w4[5]; t0[5] = w5[0]; t1[5] = w5[1]; t2[5] = w5[2]; t3[5] = w5[3]; t4[5] = w5[4]; t5[5] = w5[5]; } // d = B_t * d_t for (int n = 0; n < 6; n++) { d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n]; d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n]; d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n]; d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n]; d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n]; d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n]; } // save to out_tm { out_tm0[0] = d0[0]; out_tm0[1] = d0[1]; out_tm0[2] = d0[2]; out_tm0[3] = d0[3]; out_tm1[0] = d0[4]; out_tm1[1] = d0[5]; out_tm1[2] = d1[0]; out_tm1[3] = d1[1]; out_tm2[0] = d1[2]; out_tm2[1] = d1[3]; out_tm2[2] = d1[4]; out_tm2[3] = d1[5]; out_tm3[0] = d2[0]; out_tm3[1] = d2[1]; out_tm3[2] = d2[2]; out_tm3[3] = d2[3]; out_tm4[0] = d2[4]; out_tm4[1] = d2[5]; out_tm4[2] = d3[0]; out_tm4[3] = d3[1]; out_tm5[0] = d3[2]; out_tm5[1] = d3[3]; out_tm5[2] = d3[4]; out_tm5[3] = d3[5]; out_tm6[0] = d4[0]; out_tm6[1] = d4[1]; out_tm6[2] = d4[2]; out_tm6[3] = d4[3]; out_tm7[0] = d4[4]; out_tm7[1] = d4[5]; out_tm7[2] = d5[0]; out_tm7[3] = d5[1]; out_tm8[0] = d5[2]; out_tm8[1] = d5[3]; out_tm8[2] = d5[4]; out_tm8[3] = d5[5]; } #endif // __AVX__ r0 += 4; r1 += 4; r2 += 4; r3 += 4; r4 += 4; r5 += 4; } } } } // BEGIN dot float* top_blob_tm = NULL; { int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; const int tiles_n = 36 * tiles; top_blob_tm = dot_block; #pragma omp parallel for num_threads(num_thread) for (int r = 0; r < 9; r++) { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; for (int pp = 0; pp < nn_outch; pp++) { int p = pp << 3; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); float* output4_tm = top_blob_tm + tiles_n * (p + 4); float* output5_tm = top_blob_tm + tiles_n * (p + 5); float* output6_tm = top_blob_tm + tiles_n * (p + 6); float* output7_tm = top_blob_tm + tiles_n * (p + 7); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; output4_tm = output4_tm + r * 4; output5_tm = output5_tm + r * 4; output6_tm = output6_tm + r * 4; output7_tm = output7_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); __m128 _sum4 = _mm_broadcast_ss(&zero_val); __m128 _sum5 = _mm_broadcast_ss(&zero_val); __m128 _sum6 = _mm_broadcast_ss(&zero_val); __m128 _sum7 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); __m128 _sum4 = _mm_set1_ps(0.f); __m128 _sum5 = _mm_set1_ps(0.f); __m128 _sum6 = _mm_set1_ps(0.f); __m128 _sum7 = _mm_set1_ps(0.f); #endif int q = 0; for (; q + 3 < inch; q = q + 4) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _r1 = _mm_loadu_ps(r0 + 4); __m128 _r2 = _mm_loadu_ps(r0 + 8); __m128 _r3 = _mm_loadu_ps(r0 + 12); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r1, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r1, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r1, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r1, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r1, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r1, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r1, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r1, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r2, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r2, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r2, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r2, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r2, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r2, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r2, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r2, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7)); #endif kptr += 32; _k0 = _mm_loadu_ps(kptr); _k1 = _mm_loadu_ps(kptr + 4); _k2 = _mm_loadu_ps(kptr + 8); _k3 = _mm_loadu_ps(kptr + 12); _k4 = _mm_loadu_ps(kptr + 16); _k5 = _mm_loadu_ps(kptr + 20); _k6 = _mm_loadu_ps(kptr + 24); _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r3, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r3, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r3, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r3, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r3, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r3, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r3, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r3, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7)); #endif kptr += 32; r0 += 16; } for (; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); __m128 _k4 = _mm_loadu_ps(kptr + 16); __m128 _k5 = _mm_loadu_ps(kptr + 20); __m128 _k6 = _mm_loadu_ps(kptr + 24); __m128 _k7 = _mm_loadu_ps(kptr + 28); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); _sum4 = _mm_fmadd_ps(_r0, _k4, _sum4); _sum5 = _mm_fmadd_ps(_r0, _k5, _sum5); _sum6 = _mm_fmadd_ps(_r0, _k6, _sum6); _sum7 = _mm_fmadd_ps(_r0, _k7, _sum7); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); _sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4)); _sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5)); _sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6)); _sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7)); #endif kptr += 32; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); _mm_storeu_ps(output4_tm, _sum4); _mm_storeu_ps(output5_tm, _sum5); _mm_storeu_ps(output6_tm, _sum6); _mm_storeu_ps(output7_tm, _sum7); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; float sum4[4] = {0}; float sum5[4] = {0}; float sum6[4] = {0}; float sum7[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; sum4[n] += r0[n] * kptr[n + 16]; sum5[n] += r0[n] * kptr[n + 20]; sum6[n] += r0[n] * kptr[n + 24]; sum7[n] += r0[n] * kptr[n + 28]; } kptr += 32; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; output4_tm[n] = sum4[n]; output5_tm[n] = sum5[n]; output6_tm[n] = sum6[n]; output7_tm[n] = sum7[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; output4_tm += 36; output5_tm += 36; output6_tm += 36; output7_tm += 36; } } nn_outch = (outch - remain_outch_start) >> 2; for (int pp = 0; pp < nn_outch; pp++) { int p = remain_outch_start + pp * 4; float* output0_tm = top_blob_tm + tiles_n * p; float* output1_tm = top_blob_tm + tiles_n * (p + 1); float* output2_tm = top_blob_tm + tiles_n * (p + 2); float* output3_tm = top_blob_tm + tiles_n * (p + 3); output0_tm = output0_tm + r * 4; output1_tm = output1_tm + r * 4; output2_tm = output2_tm + r * 4; output3_tm = output3_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); __m128 _sum1 = _mm_broadcast_ss(&zero_val); __m128 _sum2 = _mm_broadcast_ss(&zero_val); __m128 _sum3 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); __m128 _sum1 = _mm_set1_ps(0.f); __m128 _sum2 = _mm_set1_ps(0.f); __m128 _sum3 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); __m128 _k1 = _mm_loadu_ps(kptr + 4); __m128 _k2 = _mm_loadu_ps(kptr + 8); __m128 _k3 = _mm_loadu_ps(kptr + 12); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); _sum1 = _mm_fmadd_ps(_r0, _k1, _sum1); _sum2 = _mm_fmadd_ps(_r0, _k2, _sum2); _sum3 = _mm_fmadd_ps(_r0, _k3, _sum3); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3)); #endif kptr += 16; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); _mm_storeu_ps(output1_tm, _sum1); _mm_storeu_ps(output2_tm, _sum2); _mm_storeu_ps(output3_tm, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; sum1[n] += r0[n] * kptr[n + 4]; sum2[n] += r0[n] * kptr[n + 8]; sum3[n] += r0[n] * kptr[n + 12]; } kptr += 16; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; output1_tm[n] = sum1[n]; output2_tm[n] = sum2[n]; output3_tm[n] = sum3[n]; } #endif // __AVX__ output0_tm += 36; output1_tm += 36; output2_tm += 36; output3_tm += 36; } } remain_outch_start += nn_outch << 2; for (int p = remain_outch_start; p < outch; p++) { float* output0_tm = top_blob_tm + 36 * tiles * p; output0_tm = output0_tm + r * 4; for (int i = 0; i < tiles; i++) { const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i); #if __AVX__ || __SSE__ #if __AVX__ float zero_val = 0.f; __m128 _sum0 = _mm_broadcast_ss(&zero_val); #else __m128 _sum0 = _mm_set1_ps(0.f); #endif for (int q = 0; q < inch; q++) { __m128 _r0 = _mm_loadu_ps(r0); __m128 _k0 = _mm_loadu_ps(kptr); #if __AVX__ _sum0 = _mm_fmadd_ps(_r0, _k0, _sum0); #else _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0)); #endif kptr += 4; r0 += 4; } _mm_storeu_ps(output0_tm, _sum0); #else float sum0[4] = {0}; for (int q = 0; q < inch; q++) { for (int n = 0; n < 4; n++) { sum0[n] += r0[n] * kptr[n]; } kptr += 4; r0 += 4; } for (int n = 0; n < 4; n++) { output0_tm[n] = sum0[n]; } #endif // __AVX__ || __SSE__ output0_tm += 36; } } } } // END dot // BEGIN transform output float* top_blob_bordered = NULL; if (outw_align == outw && outh_align == outh) { top_blob_bordered = top_blob; } else { top_blob_bordered = output_bordered; } { // AT // const float itm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + r01 + r02 + r03 + r04 // 1 = r01 - r02 + 2 * (r03 - r04) // 2 = r01 + r02 + 4 * (r03 + r04) // 3 = r01 - r02 + 8 * (r03 - r04) + r05 int w_tm = outw_align / 4 * 6; int h_tm = outh_align / 4 * 6; int nColBlocks = h_tm / 6; // may be the block num in Feathercnn int nRowBlocks = w_tm / 6; const int tiles = nColBlocks * nRowBlocks; #pragma omp parallel for num_threads(num_thread) for (int p = 0; p < outch; p++) { float* out_tile = top_blob_tm + 36 * tiles * p; float* outRow0 = top_blob_bordered + outw_align * outh_align * p; float* outRow1 = outRow0 + outw_align; float* outRow2 = outRow0 + outw_align * 2; float* outRow3 = outRow0 + outw_align * 3; const float bias0 = bias ? bias[p] : 0.f; for (int j = 0; j < nColBlocks; j++) { for (int i = 0; i < nRowBlocks; i++) { // TODO AVX2 float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6]; float w0[6], w1[6], w2[6], w3[6]; float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4]; float o0[4], o1[4], o2[4], o3[4]; // load for (int n = 0; n < 6; n++) { s0[n] = out_tile[n]; s1[n] = out_tile[n + 6]; s2[n] = out_tile[n + 12]; s3[n] = out_tile[n + 18]; s4[n] = out_tile[n + 24]; s5[n] = out_tile[n + 30]; } // w = A_T * W for (int n = 0; n < 6; n++) { w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n]; w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n]; w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n]; w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n]; } // transpose w to w_t { d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0]; d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1]; d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2]; d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3]; d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4]; d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5]; } // Y = A_T * w_t for (int n = 0; n < 4; n++) { o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n]; o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n]; o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n]; o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n]; } // save to top blob tm for (int n = 0; n < 4; n++) { outRow0[n] = o0[n] + bias0; outRow1[n] = o1[n] + bias0; outRow2[n] = o2[n] + bias0; outRow3[n] = o3[n] + bias0; } out_tile += 36; outRow0 += 4; outRow1 += 4; outRow2 += 4; outRow3 += 4; } outRow0 += outw_align * 3; outRow1 += outw_align * 3; outRow2 += outw_align * 3; outRow3 += outw_align * 3; } } } // END transform output if (outw_align != outw || outh_align != outw) { delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0); } } void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch) { float* kernel_tm = ( float* )sys_malloc((unsigned long)6 * 6 * inch * outch * sizeof(float)); // G const float ktm[6][3] = { {1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6}, {1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}}; #pragma omp parallel for for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const float* kernel0 = kernel + p * inch * 9 + q * 9; float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36; // transform kernel const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; // h float tmp[6][3] = {0}; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { float* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } float* kernel_tm_test = kernel_wino; for (int r = 0; r < 9; r++) { int p = 0; for (; p + 7 < outch; p += 8) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36; const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36; const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36; const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36; float* ktmp = kernel_tm_test + p / 8 * inch * 32; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp[16] = kernel4[r * 4 + 0]; ktmp[17] = kernel4[r * 4 + 1]; ktmp[18] = kernel4[r * 4 + 2]; ktmp[19] = kernel4[r * 4 + 3]; ktmp[20] = kernel5[r * 4 + 0]; ktmp[21] = kernel5[r * 4 + 1]; ktmp[22] = kernel5[r * 4 + 2]; ktmp[23] = kernel5[r * 4 + 3]; ktmp[24] = kernel6[r * 4 + 0]; ktmp[25] = kernel6[r * 4 + 1]; ktmp[26] = kernel6[r * 4 + 2]; ktmp[27] = kernel6[r * 4 + 3]; ktmp[28] = kernel7[r * 4 + 0]; ktmp[29] = kernel7[r * 4 + 1]; ktmp[30] = kernel7[r * 4 + 2]; ktmp[31] = kernel7[r * 4 + 3]; ktmp += 32; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; kernel4 += 36; kernel5 += 36; kernel6 += 36; kernel7 += 36; } } for (; p + 3 < outch; p += 4) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36; const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36; const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp[4] = kernel1[r * 4 + 0]; ktmp[5] = kernel1[r * 4 + 1]; ktmp[6] = kernel1[r * 4 + 2]; ktmp[7] = kernel1[r * 4 + 3]; ktmp[8] = kernel2[r * 4 + 0]; ktmp[9] = kernel2[r * 4 + 1]; ktmp[10] = kernel2[r * 4 + 2]; ktmp[11] = kernel2[r * 4 + 3]; ktmp[12] = kernel3[r * 4 + 0]; ktmp[13] = kernel3[r * 4 + 1]; ktmp[14] = kernel3[r * 4 + 2]; ktmp[15] = kernel3[r * 4 + 3]; ktmp += 16; kernel0 += 36; kernel1 += 36; kernel2 += 36; kernel3 += 36; } } for (; p < outch; p++) { const float* kernel0 = ( const float* )kernel_tm + p * inch * 36; float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4; for (int q = 0; q < inch; q++) { ktmp[0] = kernel0[r * 4 + 0]; ktmp[1] = kernel0[r * 4 + 1]; ktmp[2] = kernel0[r * 4 + 2]; ktmp[3] = kernel0[r * 4 + 3]; ktmp += 4; kernel0 += 36; } } kernel_tm_test += 4 * inch * outch; } free(kernel_tm); } int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param) { int batch = input_tensor->dims[0]; int input_c = input_tensor->dims[1]; int input_h = input_tensor->dims[2]; int input_w = input_tensor->dims[3]; int output_c = output_tensor->dims[1]; int output_h = output_tensor->dims[2]; int output_w = output_tensor->dims[3]; int pad_h = param->pad_h0; int pad_w = param->pad_w0; float* kernel = ( float* )filter_tensor->data; if (!priv_info->external_interleave_mem) { int mem_size = get_private_mem_size(filter_tensor, param); void* mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } int block_h = (output_h + TILE - 1) / TILE; int block_w = (output_w + TILE - 1) / TILE; int block = block_h * block_w; int padded_inh = TILE * block_h + 2; int padded_inw = TILE * block_w + 2; int pad_inhw = padded_inh * padded_inw; int outw = block_w * TILE; int outh = block_h * TILE; priv_info->input_pad = ( float* )sys_malloc((unsigned long)batch * input_c * pad_inhw * sizeof(float)); memset(priv_info->input_pad, 0, (unsigned long)batch * input_c * pad_inhw * sizeof(float)); priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * (unsigned long)block * output_c * sizeof(float)); priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * (unsigned long)block * input_c * sizeof(float)); priv_info->output_bordered = NULL; if (outw != output_w || outh != output_h) { priv_info->output_bordered = ( float* )sys_malloc((unsigned long)outw * outh * output_c * sizeof(float)); } conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c); return 0; } int wino_conv_hcl_postrun(struct conv_priv_info* priv_info) { if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL) { sys_free(priv_info->interleave_buffer); priv_info->interleave_buffer = NULL; } if (priv_info->input_pad) { sys_free(priv_info->input_pad); priv_info->input_pad = NULL; } if (priv_info->dot_block) { sys_free(priv_info->dot_block); priv_info->dot_block = NULL; } if (priv_info->transform_input) { sys_free(priv_info->transform_input); priv_info->transform_input = NULL; } if (priv_info->output_bordered) { sys_free(priv_info->output_bordered); priv_info->output_bordered = NULL; } return 0; } int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param, int num_thread, int cpu_affinity) { /* param */ int kernel_h = param->kernel_h; int kernel_w = param->kernel_w; int stride_h = param->stride_h; int stride_w = param->stride_w; int dilation_h = param->dilation_h; int dilation_w = param->dilation_w; int pad_h0 = param->pad_h0; int pad_w0 = param->pad_w0; int act_type = param->activation; int group = param->group; int batch = input_tensor->dims[0]; int in_c = input_tensor->dims[1]; int in_c_g = input_tensor->dims[1] / group; int in_h = input_tensor->dims[2]; int in_w = input_tensor->dims[3]; int input_size = in_c * in_h * in_w; int input_size_g = in_c_g * in_h * in_w; int kernel_size = in_c * kernel_h * kernel_w; int out_c = output_tensor->dims[1]; int out_h = output_tensor->dims[2]; int out_w = output_tensor->dims[3]; int out_hw = out_h * out_w; int output_size = out_c * out_h * out_w; int out_c_align = ((out_c + 3) & -4); /* wino param */ int block_h = (out_h + TILE - 1) / TILE; int block_w = (out_w + TILE - 1) / TILE; int block_hw = block_h * block_w; int padded_in_h = block_h * TILE + 2; int padded_in_w = block_w * TILE + 2; int padded_in_hw = padded_in_h * padded_in_w; /* buffer addr */ float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* biases = NULL; if (bias_tensor != NULL) biases = ( float* )bias_tensor->data; for (int i = 0; i < batch; i++) { for (int g = 0; g < group; g++) { pad_0_align_3D((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w, in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0); conv3x3s1_winograd43_sse((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g, output + i * out_c * out_h * out_w, priv_info->interleave_buffer, priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered, biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread); } } if (act_type >= 0) { relu(output, batch * output_size, act_type); } return 0; }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef typename scalar_product_traits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> lhs(_lhs,lhsStride); const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> rhs(_rhs,rhsStride); typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction //Index nc = blocking.nc(); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! Index tid = omp_get_thread_num(); Index threads = omp_get_num_threads(); std::size_t sizeA = kc*mc; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, 0); ei_declare_aligned_stack_constructed_variable(RhsScalar, w, sizeW, 0); RhsScalar* blockB = blocking.blockB(); eigen_internal_assert(blockB!=0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing A'. pack_lhs(blockA, &lhs(0,k), lhsStride, actual_kc, mc); // Pack B_k to B' in a parallel fashion: // each thread packs the sub block B_k,j to B'_j where j is the thread id. // However, before copying to B'_j, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_rhs(blockB+info[tid].rhs_start*actual_kc, &rhs(k,info[tid].rhs_start), rhsStride, actual_kc, info[tid].rhs_length); // Notify the other threads that the part B'_j is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per B'_j for(Index shift=0; shift<threads; ++shift) { Index j = (tid+shift)%threads; // At this point we have to make sure that B'_j has been updated by the thread j, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if(shift>0) while(info[j].sync!=k) {} gebp(res+info[j].rhs_start*resStride, resStride, blockA, blockB+info[j].rhs_start*actual_kc, mc, actual_kc, info[j].rhs_length, alpha, -1,-1,0,0, w); } // Then keep going as usual with the remaining A' for(Index i=mc; i<rows; i+=mc) { const Index actual_mc = (std::min)(i+mc,rows)-i; // pack A_i,k to A' pack_lhs(blockA, &lhs(i,k), lhsStride, actual_kc, actual_mc); // C_i += A' * B' gebp(res+i, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1,-1,0,0, w); } // Release all the sub blocks B'_j of B' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index j=0; j<threads; ++j) #pragma omp atomic --(info[j].users); } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*cols; std::size_t sizeW = kc*Traits::WorkSpaceFactor; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockW, sizeW, blocking.blockW()); // For each horizontal panel of the rhs, and corresponding panel of the lhs... // (==GEMM_VAR1) for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack rhs's panel into a sequential chunk of memory (L2 caching) // Note that this panel will be read as many times as the number of blocks in the lhs's // vertical panel which is, in practice, a very low number. pack_rhs(blockB, &rhs(k2,0), rhsStride, actual_kc, cols); // For each mc x kc block of the lhs's vertical panel... // (==GEPP_VAR1) for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; // We pack the lhs's block into a sequential chunk of memory (L1 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro vertical panel of the large rhs's panel (e.g., cols/4 times). pack_lhs(blockA, &lhs(i2,k2), lhsStride, actual_kc, actual_mc); // Everything is packed, we can now call the block * panel kernel: gebp(res+i2, resStride, blockA, blockB, actual_mc, actual_kc, cols, alpha, -1, -1, 0, 0, blockW); } } } } }; /********************************************************************************* * Specialization of GeneralProduct<> for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Lhs, typename Rhs> struct traits<GeneralProduct<Lhs,Rhs,GemmProduct> > : traits<ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> > {}; template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession() const { m_blocking.allocateB(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), /*(const Scalar*)*/&m_lhs.coeffRef(row,0), m_lhs.outerStride(), /*(const Scalar*)*/&m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; RhsScalar* m_blockW; DenseIndex m_mc; DenseIndex m_nc; DenseIndex m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_blockW(0), m_mc(0), m_nc(0), m_kc(0) {} inline DenseIndex mc() const { return m_mc; } inline DenseIndex nc() const { return m_nc; } inline DenseIndex kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } inline RhsScalar* blockW() { return m_blockW; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth, SizeW = MaxDepth * Traits::WorkSpaceFactor }; EIGEN_ALIGN16 LhsScalar m_staticA[SizeA]; EIGEN_ALIGN16 RhsScalar m_staticB[SizeB]; EIGEN_ALIGN16 RhsScalar m_staticW[SizeW]; public: gemm_blocking_space(DenseIndex /*rows*/, DenseIndex /*cols*/, DenseIndex /*depth*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; this->m_blockA = m_staticA; this->m_blockB = m_staticB; this->m_blockW = m_staticW; } inline void allocateA() {} inline void allocateB() {} inline void allocateW() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; DenseIndex m_sizeA; DenseIndex m_sizeB; DenseIndex m_sizeW; public: gemm_blocking_space(DenseIndex rows, DenseIndex cols, DenseIndex depth) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; m_sizeW = this->m_kc*Traits::WorkSpaceFactor; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateW() { if(this->m_blockW==0) this->m_blockW = aligned_new<RhsScalar>(m_sizeW); } void allocateAll() { allocateA(); allocateB(); allocateW(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); aligned_delete(this->m_blockW, m_sizeW); } }; } // end namespace internal template<typename Lhs, typename Rhs> class GeneralProduct<Lhs, Rhs, GemmProduct> : public ProductBase<GeneralProduct<Lhs,Rhs,GemmProduct>, Lhs, Rhs> { enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; public: EIGEN_PRODUCT_PUBLIC_INTERFACE(GeneralProduct) typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef Scalar ResScalar; GeneralProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) { typedef internal::scalar_product_op<LhsScalar,RhsScalar> BinOp; EIGEN_CHECK_BINARY_COMPATIBILIY(BinOp,LhsScalar,RhsScalar); } template<typename Dest> void scaleAndAddTo(Dest& dst, const Scalar& alpha) const { eigen_assert(dst.rows()==m_lhs.rows() && dst.cols()==m_rhs.cols()); typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(m_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(m_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs) * RhsBlasTraits::extractScalarFactor(m_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (_ActualLhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (_ActualRhsType::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, _ActualLhsType, _ActualRhsType, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols()); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)>(GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), this->rows(), this->cols(), Dest::Flags&RowMajorBit); } }; } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
1.c
/* Написать программу где каждый поток печатает свой идентификатор, количество потоков всего и строчку «Hello World». Запустить программу с 8 потоками. Всегда ли вывод идентичен? Почему? */ #include <stdio.h> #include <omp.h> int main(int argc, char *argv[]) { omp_set_dynamic(0); omp_set_num_threads(8); #pragma omp parallel { printf("Hello World! Это тред номер %d, всего таких нас %d\n", omp_get_thread_num(), omp_get_num_threads()); } }
in_parallel.c
#include <stdio.h> #include <omp.h> int main( ) { omp_set_num_threads(4); printf("%d\n", omp_in_parallel( )); #pragma omp parallel #pragma omp master { printf("%d\n", omp_in_parallel( )); } }
functions.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "functions.h" //compute a*b mod p safely unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) { unsigned int za = a; unsigned int ab = 0; while (b > 0) { if (b%2 == 1) ab = (ab + za) % p; za = (2 * za) % p; b /= 2; } return ab; } //compute a^b mod p safely unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) { unsigned int z = a; unsigned int aExpb = 1; while (b > 0) { if (b%2 == 1) aExpb = modprod(aExpb, z, p); z = modprod(z, z, p); b /= 2; } return aExpb; } //returns either 0 or 1 randomly unsigned int randomBit() { return rand()%2; } //returns a random integer which is between 2^{n-1} and 2^{n} unsigned int randXbitInt(unsigned int n) { unsigned int r = 1; for (unsigned int i=0; i<n-1; i++) { r = r*2 + randomBit(); } return r; } //tests for primality and return 1 if N is probably prime and 0 if N is composite unsigned int isProbablyPrime(unsigned int N) { if (N%2==0) return 0; //not interested in even numbers (including 2) unsigned int NsmallPrimes = 168; unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997}; //before using a probablistic primality check, check directly using the small primes list for (unsigned int n=1;n<NsmallPrimes;n++) { if (N==smallPrimeList[n]) return 1; //true if (N%smallPrimeList[n]==0) return 0; //false } //if we're testing a large number switch to Miller-Rabin primality test unsigned int r = 0; unsigned int d = N-1; while (d%2 == 0) { d /= 2; r += 1; } for (unsigned int n=0;n<NsmallPrimes;n++) { unsigned int k = smallPrimeList[n]; unsigned int x = modExp(k,d,N); if ((x==1) || (x==N-1)) continue; for (unsigned int i=1;i<r-1;i++) { x = modprod(x,x,N); if (x == 1) return 0; //false if (x == N-1) break; } // see whether we left the loop becasue x==N-1 if (x == N-1) continue; return 0; //false } return 1; //true } //Finds a generator of Z_p using the assumption that p=2*q+1 unsigned int findGenerator(unsigned int p) { unsigned int g; unsigned int q = (p-1)/2; do { //make a random number 1<= g < p g = randXbitInt(32)%p; //could also have passed n to findGenerator } while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1)); return g; } void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g, unsigned int *h, unsigned int *x) { /* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number which satisfies p=2*q+1 where q is also prime */ unsigned int q; do { *p = randXbitInt(n); q = (*p-1)/2; } while (!isProbablyPrime(*p) || !isProbablyPrime(q)); /* Use the fact that p=2*q+1 to quickly find a generator */ *g = findGenerator(*p); //pick a secret key, x *x = randXbitInt(n)%(*p); //compute h *h = modExp(*g,*x,*p); printf("ElGamal Setup successful.\n"); printf("p = %u. \n", *p); printf("g = %u is a generator of Z_%u \n", *g, *p); printf("Secret key: x = %u \n", *x); printf("h = g^x = %u\n", *h); printf("\n"); } void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int g, unsigned int h) { /* Q2.2 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //pick y in Z_p randomly unsigned int y; do { y = randXbitInt(32)%p; } while (y==0); //dont allow y=0 //compute a = g^y a[i] = modExp(g,y,p); //compute s = h^y unsigned int s = modExp(h,y,p); //encrypt m by multiplying with s m[i] = modprod(m[i],s,p); } } void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints, unsigned int p, unsigned int x) { /* Q2.2 Parallelize this function with OpenMP */ #pragma omp parallel for for (unsigned int i=0; i<Nints;i++) { //compute s = a^x unsigned int s = modExp(a[i],x,p); //compute s^{-1} = s^{p-2} unsigned int invS = modExp(s,p-2,p); //decrypt message by multplying by invS m[i] = modprod(m[i],invS,p); } } //Pad the end of string so its length is divisible by Nchars // Assume there is enough allocated storage for the padded string void padString(unsigned char* string, unsigned int charsPerInt) { /* Q1.2 Complete this function */ while (strlen(string) % charsPerInt != 0) { strcat(string, " "); } } void convertStringToZ(unsigned char *string, unsigned int Nchars, unsigned int *Z, unsigned int Nints) { /* Q1.3 Complete this function */ /* Q2.1 Parallelize this function with OpenMP */ if (Nchars == Nints) { #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { Z[i] = (unsigned int) string[i]; } } if (Nchars / Nints == 2) { #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { Z[i] = 256 * (unsigned int) string[2 * i] + (unsigned int) string[2 * i + 1]; } } else { #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { Z[i] = 65536 * (unsigned int) string[3 * i] + 256 * (unsigned int) string[3 * i + 1] + (unsigned int) string[3 * i + 2]; } } } void convertZToString(unsigned int *Z, unsigned int Nints, unsigned char *string, unsigned int Nchars) { /* Q1.4 Complete this function */ /* Q2.1 Parallelize this function with OpenMP */ if (Nchars == Nints) { #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { string[i] = (unsigned char) Z[i]; } } if (Nchars / Nints == 2) { #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { string[2 * i] = (unsigned char) (Z[i] / 256); string[2 * i + 1] = (unsigned char) (Z[i] % 256); } } else { #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { string[3 * i] = (unsigned char) (Z[i] / 65536); string[3 * i + 1] = (unsigned char) (Z[i] / 256 % 256); string[3 * i + 2] = (unsigned char) (Z[i] % 256); } } } /* Q.Bonus */ void convertEncryptToString(unsigned int *Z, unsigned int *a, unsigned int Nints, unsigned int charsPerInt, unsigned char *S) { if (charsPerInt == 1) { #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { S[4 * i] = (unsigned char) (Z[i] / 256); S[4 * i + 1] = (unsigned char) (Z[i] % 256); S[4 * i + 2] = (unsigned char) (a[i] / 256); S[4 * i + 3] = (unsigned char) (a[i] % 256); } } else if (charsPerInt == 2) { #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { S[6 * i] = (unsigned char) (Z[i] / 65536); S[6 * i + 1] = (unsigned char) (Z[i] / 256 % 256); S[6 * i + 2] = (unsigned char) (Z[i] % 256); S[6 * i + 3] = (unsigned char) (a[i] / 65536); S[6 * i + 4] = (unsigned char) (a[i] / 256 % 256); S[6 * i + 5] = (unsigned char) (a[i] % 256); } } else { #pragma omp parallel for for (unsigned int i = 0; i < Nints; i++) { S[8 * i] = (unsigned char) (Z[i] / 16777216); S[8 * i + 1] = (unsigned char) (Z[i] % 16777216 / 65536); S[8 * i + 2] = (unsigned char) (Z[i] % 65536 / 256); S[8 * i + 3] = (unsigned char) (Z[i] % 256); S[8 * i + 4] = (unsigned char) (a[i] / 16777216); S[8 * i + 5] = (unsigned char) (a[i] % 16777216 / 65536); S[8 * i + 6] = (unsigned char) (a[i] % 65536 / 256); S[8 * i + 7] = (unsigned char) (a[i] % 256); } } }
stribog_fmt_plug.c
/* * GOST R 34.11-2012 cracker patch for JtR. Hacked together during * the Hash Runner 2015 contest by Dhiru Kholia and Aleksey Cherepanov. * * Based on https://www.streebog.net/ and https://github.com/sjinks/php-stribog * code. See "LICENSE.gost" for licensing details of the original code. */ #include "arch.h" #if __SSE4_1__ #if FMT_EXTERNS_H extern struct fmt_main fmt_stribog_256; extern struct fmt_main fmt_stribog_512; #elif FMT_REGISTERS_H john_register_one(&fmt_stribog_256); john_register_one(&fmt_stribog_512); #else #include <string.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "gost3411-2012-sse41.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 512 // XXX #endif #endif #include "memdbg.h" #define FORMAT_LABEL "stribog" #define FORMAT_NAME "" #define TAG256 "$stribog256$" #define TAG256_LENGTH (sizeof(TAG256)-1) #define TAG512 "$stribog512$" #define TAG512_LENGTH (sizeof(TAG512)-1) #define TAG_LENGTH TAG256_LENGTH #define FORMAT_TAG TAG256 #define ALGORITHM_NAME "GOST R 34.11-2012 128/128 SSE4.1 1x" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 64 - 1 #define CIPHERTEXT256_LENGTH 64 #define CIPHERTEXT512_LENGTH 128 #define CIPHERTEXT_LENGTH CIPHERTEXT256_LENGTH #define BINARY_SIZE_256 32 #define BINARY_SIZE_512 64 #define SALT_SIZE 0 #define SALT_ALIGN 1 #define BINARY_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 static struct fmt_tests stribog_256_tests[] = { {"$stribog256$bbe19c8d2025d99f943a932a0b365a822aa36a4c479d22cc02c8973e219a533f", ""}, /* {"3f539a213e97c802cc229d474c6aa32a825a360b2a933a949fd925208d9ce1bb", ""}, */ /* 9d151eefd8590b89daa6ba6cb74af9275dd051026bb149a452fd84e5e57b5500 */ {"$stribog256$00557be5e584fd52a449b16b0251d05d27f94ab76cbaa6da890b59d8ef1e159d", "012345678901234567890123456789012345678901234567890123456789012"}, {NULL} }; static struct fmt_tests stribog_512_tests[] = { /* 8e945da209aa869f0455928529bcae4679e9873ab707b55315f56ceb98bef0a7362f715528356ee83cda5f2aac4c6ad2ba3a715c1bcd81cb8e9f90bf4c1c1a8a */ {"$stribog512$8a1a1c4cbf909f8ecb81cd1b5c713abad26a4cac2a5fda3ce86e352855712f36a7f0be98eb6cf51553b507b73a87e97946aebc29859255049f86aa09a25d948e", ""}, /* 1b54d01a4af5b9d5cc3d86d68d285462b19abc2475222f35c085122be4ba1ffa00ad30f8767b3a82384c6574f024c311e2a481332b08ef7f41797891c1646f48 */ {"$stribog512$486f64c1917879417fef082b3381a4e211c324f074654c38823a7b76f830ad00fa1fbae42b1285c0352f227524bc9ab16254288dd6863dccd5b9f54a1ad0541b", "012345678901234567890123456789012345678901234567890123456789012"}, {NULL} }; #define make_full_static_buf(type, var, len) static type (var)[(len)] #define make_dynamic_static_buf(type, var, len) \ static type *var; \ if (!var) \ var = mem_alloc_tiny((len), MEM_ALIGN_WORD) #if 1 #define make_static_buf make_dynamic_static_buf #else #define make_static_buf make_full_static_buf #endif static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE_512 / sizeof(uint32_t)]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif if (!saved_key) { saved_key = mem_calloc_align(self->params.max_keys_per_crypt, sizeof(*saved_key), MEM_ALIGN_SIMD); } if (!crypt_out) crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static char *split_256(char *ciphertext, int index, struct fmt_main *self) { make_static_buf(char, out, TAG_LENGTH + CIPHERTEXT_LENGTH + 1); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpylwr(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); return out; } static int valid_256(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; /* else */ /* return 0; */ if (strlen(p) != CIPHERTEXT_LENGTH) return 0; while(*p) if (atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } static void *get_binary_256(char *ciphertext) { static unsigned char *out; char *p = ciphertext; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE_256, MEM_ALIGN_WORD); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE_256; i++) { out[BINARY_SIZE_256 - i - 1] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #undef TAG_LENGTH #undef FORMAT_TAG #undef CIPHERTEXT_LENGTH #define TAG_LENGTH TAG512_LENGTH #define FORMAT_TAG TAG512 #define CIPHERTEXT_LENGTH CIPHERTEXT512_LENGTH static char *split_512(char *ciphertext, int index, struct fmt_main *self) { make_static_buf(char, out, TAG_LENGTH + CIPHERTEXT_LENGTH + 1); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpylwr(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); return out; } static int valid_512(char *ciphertext, struct fmt_main *self) { char *p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; /* else */ /* return 0; */ if (strlen(p) != CIPHERTEXT_LENGTH) return 0; while(*p) if (atoi16[ARCH_INDEX(*p++)]==0x7f) return 0; return 1; } static void *get_binary_512(char *ciphertext) { static unsigned char *out; char *p = ciphertext; int i; if (!out) out = mem_alloc_tiny(BINARY_SIZE_512, MEM_ALIGN_WORD); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) p = ciphertext + TAG_LENGTH; for (i = 0; i < BINARY_SIZE_512; i++) { out[BINARY_SIZE_512 - i - 1] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #undef TAG_LENGTH #undef FORMAT_TAG #undef CIPHERTEXT_LENGTH /* static int valid_256(char *ciphertext, struct fmt_main *self) */ /* { */ /* return valid(ciphertext, self, 64); */ /* } */ /* static int valid_512(char *ciphertext, struct fmt_main *self) */ /* { */ /* return valid(ciphertext, self, 128); */ /* } */ static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void stribog256_init(void* context) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; GOST34112012Init(ctx, 256); } static void stribog512_init(void* context) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; GOST34112012Init(ctx, 512); } static void stribog_update(void* context, const unsigned char* buf, unsigned int count) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; offset = (((size_t)buf + 15) & ~0x0F) - (size_t)buf; if (!offset) { GOST34112012Update(ctx, buf, count); } else { ALIGN(16) unsigned char tmp[15]; assert(offset < 16); memcpy(tmp, buf, offset); GOST34112012Update(ctx, tmp, offset); GOST34112012Update(ctx, buf + offset, count - offset); } } static void stribog_final(unsigned char* digest, void* context) { size_t offset = (((size_t)context + 15) & ~0x0F) - (size_t)context; void *ctx = (char*)context + offset; GOST34112012Final(ctx, digest); } static int crypt_256(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { /* GOST34112012Context ctx; GOST34112012Init(&ctx, 256); GOST34112012Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); GOST34112012Final(&ctx, (unsigned char*)crypt_out[index]); */ GOST34112012Context ctx[2]; // alignment stuff stribog256_init((void *)ctx); stribog_update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); stribog_final((unsigned char*)crypt_out[index], &ctx); } return count; } static int crypt_512(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for for (index = 0; index < count; index++) #endif { /* GOST34112012Context ctx; GOST34112012Init(&ctx, 512); GOST34112012Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); GOST34112012Final(&ctx, (unsigned char*)crypt_out[index]); */ GOST34112012Context ctx[2]; // alignment stuff stribog512_init((void *)ctx); stribog_update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); stribog_final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; #ifdef _OPENMP for (; index < count; index++) #endif if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one_256(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE_256); } static int cmp_one_512(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE_512); } static int cmp_exact(char *source, int index) { return 1; } static void stribog_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_stribog_256 = { { "Stribog-256", FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_256, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { TAG256 }, stribog_256_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid_256, split_256, get_binary_256, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, stribog_set_key, get_key, fmt_default_clear_keys, crypt_256, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one_256, cmp_exact } }; struct fmt_main fmt_stribog_512 = { { "Stribog-512", FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE_512, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { TAG512 }, stribog_512_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid_512, split_512, get_binary_512, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, stribog_set_key, get_key, fmt_default_clear_keys, crypt_512, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one_512, cmp_exact } }; #endif /* plugin stanza */ #else #if !defined(FMT_EXTERNS_H) && !defined(FMT_REGISTERS_H) #ifdef __GNUC__ #warning Stribog-256 and Stribog-512 formats require SSE 4.1, formats disabled #elif _MSC_VER #pragma message(": warning Stribog-256 and Stribog-512 formats require SSE 4.1, formats disabled:") #endif #endif #endif /* __SSE4_1__ */
SwathFile.h
// -------------------------------------------------------------------------- // OpenMS -- Open-Source Mass Spectrometry // -------------------------------------------------------------------------- // Copyright The OpenMS Team -- Eberhard Karls University Tuebingen, // ETH Zurich, and Freie Universitaet Berlin 2002-2013. // // This software is released under a three-clause BSD license: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of any author or any participating institution // may be used to endorse or promote products derived from this software // without specific prior written permission. // For a full list of authors, refer to the file AUTHORS. // -------------------------------------------------------------------------- // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING // INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // -------------------------------------------------------------------------- // $Maintainer: Hannes Roest $ // $Authors: Hannes Roest $ // -------------------------------------------------------------------------- #ifndef OPENMS_FORMAT_SWATHFILE_H #define OPENMS_FORMAT_SWATHFILE_H #include <OpenMS/KERNEL/MSExperiment.h> #include <OpenMS/FORMAT/MzMLFile.h> #include <OpenMS/FORMAT/MzXMLFile.h> #ifdef OPENMS_FORMAT_SWATHFILE_MZXMLSUPPORT #include <OpenMS/FORMAT/MzXMLFile.h> #endif #include <OpenMS/FORMAT/DATAACCESS/SwathFileConsumer.h> namespace OpenMS { /** * @brief File adapter for Swath files. * * This class can load SWATH files in different storage versions. The most * convenient file is a single MzML file which contains one experiment. * However, also the loading of a list of files is supported (loadSplit) * where it is assumed that each individual file only contains scans from one * precursor isolation window (one SWATH). Finally, experimental support for * mzXML is available but needs to be selected with a specific compile flag * (this is not for everyday use). * */ class OPENMS_DLLAPI SwathFile : public ProgressLogger { public: /// Loads a Swath run from a list of split mzML files std::vector<OpenSwath::SwathMap> loadSplit(StringList file_list, String tmp, boost::shared_ptr<ExperimentalSettings>& exp_meta, String readoptions = "normal") { int progress = 0; startProgress(0, file_list.size(), "Loading data"); std::vector<OpenSwath::SwathMap> swath_maps(file_list.size()); #ifdef _OPENMP #pragma omp parallel for #endif for (SignedSize i = 0; i < boost::numeric_cast<SignedSize>(file_list.size()); ++i) { #ifdef _OPENMP #pragma omp critical (load) #endif { std::cout << "Loading file " << i << " with name " << file_list[i] << " using readoptions " << readoptions << std::endl; } String tmp_fname = "openswath_tmpfile_" + String(i) + ".mzML"; boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>); OpenSwath::SpectrumAccessPtr spectra_ptr; // Populate meta-data if (i == 0) { exp_meta = populateMetaData_(file_list[i]); } if (readoptions == "normal") { MzMLFile().load(file_list[i], *exp.get()); spectra_ptr = SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(exp); } else if (readoptions == "cache") { // Cache and load the exp (metadata only) file again spectra_ptr = doCacheFile_(file_list[i], tmp, tmp_fname, exp); } else { throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Unknown option " + readoptions); } OpenSwath::SwathMap swath_map; bool ms1 = false; double upper = -1, lower = -1; if (exp->size() == 0) { std::cerr << "WARNING: File " << file_list[i] << "\n does not have any scans - I will skip it" << std::endl; continue; } if (exp->getSpectra()[0].getPrecursors().size() == 0) { std::cout << "NOTE: File " << file_list[i] << "\n does not have any precursors - I will assume it is the MS1 scan." << std::endl; ms1 = true; } else { // Checks that this is really a SWATH map and extracts upper/lower window OpenSwathHelper::checkSwathMap(*exp.get(), lower, upper); } swath_map.sptr = spectra_ptr; swath_map.lower = lower; swath_map.upper = upper; swath_map.ms1 = ms1; #ifdef _OPENMP #pragma omp critical (load) #endif { LOG_DEBUG << "Adding Swath file " << file_list[i] << " with " << swath_map.lower << " to " << swath_map.upper << std::endl; swath_maps[i] = swath_map; setProgress(progress++); } } endProgress(); return swath_maps; } /// Loads a Swath run from a single mzML file std::vector<OpenSwath::SwathMap> loadMzML(String file, String tmp, boost::shared_ptr<ExperimentalSettings>& exp_meta, String readoptions = "normal") { std::cout << "Loading mzML file " << file << " using readoptions " << readoptions << std::endl; String tmp_fname = "openswath_tmpfile"; startProgress(0, 1, "Loading metadata file " + file); boost::shared_ptr<MSExperiment<Peak1D> > experiment_metadata = populateMetaData_(file); exp_meta = experiment_metadata; // First pass through the file -> get the meta data std::cout << "Will analyze the metadata first to determine the number of SWATH windows and the window sizes." << std::endl; std::vector<int> swath_counter; int nr_ms1_spectra; std::vector<OpenSwath::SwathMap> known_window_boundaries; countScansInSwath_(experiment_metadata->getSpectra(), swath_counter, nr_ms1_spectra, known_window_boundaries); std::cout << "Determined there to be " << swath_counter.size() << " SWATH windows and in total " << nr_ms1_spectra << " MS1 spectra" << std::endl; endProgress(); FullSwathFileConsumer* dataConsumer; boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>); startProgress(0, 1, "Loading data file " + file); if (readoptions == "normal") { dataConsumer = new RegularSwathFileConsumer(known_window_boundaries); MzMLFile().transform(file, dataConsumer, *exp.get()); } else if (readoptions == "cache") { dataConsumer = new CachedSwathFileConsumer(known_window_boundaries, tmp, tmp_fname, nr_ms1_spectra, swath_counter); MzMLFile().transform(file, dataConsumer, *exp.get()); } else { throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Unknown or unsupported option " + readoptions); } std::vector<OpenSwath::SwathMap> swath_maps; dataConsumer->retrieveSwathMaps(swath_maps); delete dataConsumer; endProgress(); return swath_maps; } /// Loads a Swath run from a single mzXML file std::vector<OpenSwath::SwathMap> loadMzXML(String file, String tmp, boost::shared_ptr<ExperimentalSettings>& exp_meta, String readoptions = "normal") { std::cout << "Loading mzXML file " << file << " using readoptions " << readoptions << std::endl; String tmp_fname = "openswath_tmpfile"; startProgress(0, 1, "Loading metadata file " + file); boost::shared_ptr<MSExperiment<Peak1D> > experiment_metadata(new MSExperiment<Peak1D>); MzXMLFile f; f.getOptions().setAlwaysAppendData(true); f.getOptions().setFillData(false); f.load(file, *experiment_metadata); exp_meta = experiment_metadata; // First pass through the file -> get the meta data std::cout << "Will analyze the metadata first to determine the number of SWATH windows and the window sizes." << std::endl; std::vector<int> swath_counter; int nr_ms1_spectra; std::vector<OpenSwath::SwathMap> known_window_boundaries; countScansInSwath_(experiment_metadata->getSpectra(), swath_counter, nr_ms1_spectra, known_window_boundaries); std::cout << "Determined there to be " << swath_counter.size() << " SWATH windows and in total " << nr_ms1_spectra << " MS1 spectra" << std::endl; endProgress(); FullSwathFileConsumer* dataConsumer; boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>); startProgress(0, 1, "Loading data file " + file); if (readoptions == "normal") { dataConsumer = new RegularSwathFileConsumer(known_window_boundaries); MzXMLFile().transform(file, dataConsumer, *exp.get()); } else if (readoptions == "cache") { dataConsumer = new CachedSwathFileConsumer(known_window_boundaries, tmp, tmp_fname, nr_ms1_spectra, swath_counter); MzXMLFile().transform(file, dataConsumer, *exp.get()); } else { throw Exception::IllegalArgument(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Unknown or unsupported option " + readoptions); } std::vector<OpenSwath::SwathMap> swath_maps; dataConsumer->retrieveSwathMaps(swath_maps); delete dataConsumer; endProgress(); return swath_maps; } protected: /// Cache a file to disk OpenSwath::SpectrumAccessPtr doCacheFile_(String in, String tmp, String tmp_fname, boost::shared_ptr<MSExperiment<Peak1D> > experiment_metadata) { String cached_file = tmp + tmp_fname + ".cached"; String meta_file = tmp + tmp_fname; // Create new consumer, transform infile, write out metadata MSDataCachedConsumer* cachedConsumer = new MSDataCachedConsumer(cached_file, true); MzMLFile().transform(in, cachedConsumer, *experiment_metadata.get()); CachedmzML().writeMetadata(*experiment_metadata.get(), meta_file, true); delete cachedConsumer; // ensure that filestream gets closed boost::shared_ptr<MSExperiment<Peak1D> > exp(new MSExperiment<Peak1D>); MzMLFile().load(meta_file, *exp.get()); return SimpleOpenMSSpectraFactory::getSpectrumAccessOpenMSPtr(exp); } /// Only read the meta data from a file and use it to populate exp_meta boost::shared_ptr< MSExperiment<Peak1D> > populateMetaData_(String file) { boost::shared_ptr<MSExperiment<Peak1D> > experiment_metadata(new MSExperiment<Peak1D>); MzMLFile f; f.getOptions().setAlwaysAppendData(true); f.getOptions().setFillData(false); f.load(file, *experiment_metadata); return experiment_metadata; } /// Counts the number of scans in a full Swath file (e.g. concatenated non-split file) void countScansInSwath_(const std::vector<MSSpectrum<> > exp, std::vector<int>& swath_counter, int& nr_ms1_spectra, std::vector<OpenSwath::SwathMap>& known_window_boundaries) { int ms1_counter = 0; for (Size i = 0; i < exp.size(); i++) { const MSSpectrum<>& s = exp[i]; { if (s.getMSLevel() == 1) { ms1_counter++; } else { if (s.getPrecursors().empty()) { throw Exception::InvalidParameter(__FILE__, __LINE__, __PRETTY_FUNCTION__, "Swath scan needs to have a precursor set."); } const std::vector<Precursor> prec = s.getPrecursors(); double center = prec[0].getMZ(); bool found = false; for (Size j = 0; j < known_window_boundaries.size(); j++) { // We group by the precursor mz (center of the window) since this // should be present if (std::fabs(center - known_window_boundaries[j].center) < 1e-6) { found = true; swath_counter[j]++; } } if (!found) { // we found a new SWATH scan swath_counter.push_back(1); double lower = prec[0].getMZ() - prec[0].getIsolationWindowLowerOffset(); double upper = prec[0].getMZ() + prec[0].getIsolationWindowUpperOffset(); OpenSwath::SwathMap boundary; boundary.lower = lower; boundary.upper = upper; boundary.center = center; known_window_boundaries.push_back(boundary); LOG_DEBUG << "Adding Swath centered at " << center << " m/z with an isolation window of " << lower << " to " << upper << " m/z." << std::endl; } } } } nr_ms1_spectra = ms1_counter; std::cout << "Determined there to be " << swath_counter.size() << " SWATH windows and in total " << nr_ms1_spectra << " MS1 spectra" << std::endl; } }; } #endif
convoaa.c
/* Copyright 2015. The Regents of the University of California. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2012 Martin Uecker uecker@eecs.berkeley.edu */ #include <assert.h> #include "misc/misc.h" #include "num/multind.h" #include "num/flpmath.h" #include "num/conv.h" #include "num/vecops.h" #include "convoaa.h" void overlapandadd(int N, const long dims[N], const long blk[N], complex float* dst, complex float* src1, const long dim2[N], complex float* src2) { long ndims[2 * N]; long L[2 * N]; long ndim2[2 * N]; long ndim3[2 * N]; for (int i = 0; i < N; i++) { assert(0 == dims[i] % blk[i]); assert(dim2[i] <= blk[i]); ndims[i * 2 + 1] = dims[i] / blk[i]; ndims[i * 2 + 0] = blk[i]; L[i * 2 + 1] = dims[i] / blk[i]; L[i * 2 + 0] = blk[i] + dim2[i] - 1; ndim2[i * 2 + 1] = 1; ndim2[i * 2 + 0] = dim2[i]; ndim3[i * 2 + 1] = dims[i] / blk[i] + 1; ndim3[i * 2 + 0] = blk[i]; } complex float* tmp = md_alloc(2 * N, L, CFL_SIZE); // conv_causal_extend(2 * N, L, tmp, ndims, src1, ndim2, src2); conv(2 * N, ~0, CONV_EXTENDED, CONV_CAUSAL, L, tmp, ndims, src1, ndim2, src2); // [------++++|||||||| //long str1[2 * N]; long str2[2 * N]; long str3[2 * N]; //md_calc_strides(2 * N, str1, ndims, 8); md_calc_strides(2 * N, str2, L, 8); md_calc_strides(2 * N, str3, ndim3, 8); md_clear(2 * N, ndim3, dst, CFL_SIZE); md_zadd2(2 * N, L, str3, dst, str3, dst, str2, tmp); md_free(tmp); } void overlapandsave(int N, const long dims[N], const long blk[N], complex float* dst, complex float* src1, const long dim2[N], complex float* src2) { // [------++++ // [------ long ndims[2 * N]; long L[2 * N]; long ndim2[2 * N]; long ndim3[2 * N]; for (int i = 0; i < N; i++) { assert(0 == dims[i] % blk[i]); assert(dim2[i] <= blk[i]); ndims[i * 2 + 1] = dims[i] / blk[i]; ndims[i * 2 + 0] = blk[i]; L[i * 2 + 1] = dims[i] / blk[i]; L[i * 2 + 0] = blk[i] + dim2[i] - 1; ndim2[i * 2 + 1] = 1; ndim2[i * 2 + 0] = dim2[i]; ndim3[i * 2 + 1] = dims[i] / blk[i] - 0; ndim3[i * 2 + 0] = blk[i]; } complex float* tmp = md_alloc(2 * N, L, CFL_SIZE); long str1[2 * N]; long str2[2 * N]; long str3[2 * N]; md_calc_strides(2 * N, str1, ndims, 8); md_calc_strides(2 * N, str2, L, 8); md_calc_strides(2 * N, str3, ndim3, 8); md_clear(2 * N, L, tmp, 8); md_copy2(2 * N, ndim3, str2, tmp, str1, src1, 8); conv(2 * N, ~0, CONV_VALID, CONV_CAUSAL, ndims, dst, L, tmp, ndim2, src2); md_free(tmp); } #if 0 struct conv_plan* overlapandsave_plan(int N, const long dims[N], const long blk[N], const long dim2[N], complex float* src2) { return conv_plan(2 * N, ~0, CONV_VALID, CONV_CAUSAL, ndims, L, ndim2, src2); } void overlapandsave_exec(struct conv_plan* plan, int N, const long dims[N], const long blk[N], complex float* dst, complex float* src1, const long dim2[N]) { md_clear(2 * N, L, tmp, 8); md_copy2(2 * N, ndim3, str2, tmp, str1, src1, 8); conv_exec(plan, dst, tmp); free(tmp); } #endif void overlapandsave2(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], const complex float* src1, const long dims2[N], const complex float* src2) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims1[2 * N]; long ndims2[2 * N]; long shift[2 * N]; unsigned int nflags = 0; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { nflags = MD_SET(nflags, 2 * i); assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); // blocked output nodims[i * 2 + 1] = odims[i] / blk[i]; nodims[i * 2 + 0] = blk[i]; // expanded temporary storage tdims[i * 2 + 1] = dims1[i] / blk[i]; tdims[i * 2 + 0] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + // resized input dims1B[i] = dims1[i] + 2 * blk[i]; ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks? ndims1[i * 2 + 0] = blk[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2; // kernel ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } else { nodims[i * 2 + 1] = 1; nodims[i * 2 + 0] = odims[i]; tdims[i * 2 + 1] = 1; tdims[i * 2 + 0] = dims1[i]; ndims1[i * 2 + 1] = 1; ndims1[i * 2 + 0] = dims1[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = 0; dims1B[i] = dims1[i]; ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } } complex float* src1B = md_alloc(N, dims1B, CFL_SIZE); md_resize_center(N, dims1B, src1B, dims1, src1, CFL_SIZE); complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE); long str1[2 * N]; long str2[2 * N]; md_calc_strides(2 * N, str1, ndims1, CFL_SIZE); md_calc_strides(2 * N, str2, tdims, CFL_SIZE); long off = md_calc_offset(2 * N, str1, shift); md_copy2(2 * N, tdims, str2, tmp, str1, ((void*)src1B) + off, CFL_SIZE); md_free(src1B); conv(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, nodims, dst, tdims, tmp, ndims2, src2); md_free(tmp); } void overlapandsave2H(int N, unsigned int flags, const long blk[N], const long dims1[N], complex float* dst, const long odims[N], const complex float* src1, const long dims2[N], const complex float* src2) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims1[2 * N]; long ndims2[2 * N]; long shift[2 * N]; unsigned int nflags = 0; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { nflags = MD_SET(nflags, 2 * i); assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); // blocked output nodims[i * 2 + 1] = odims[i] / blk[i]; nodims[i * 2 + 0] = blk[i]; // expanded temporary storage tdims[i * 2 + 1] = dims1[i] / blk[i]; tdims[i * 2 + 0] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + // resized input dims1B[i] = dims1[i] + 2 * blk[i]; ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks? ndims1[i * 2 + 0] = blk[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2; // kernel ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } else { nodims[i * 2 + 1] = 1; nodims[i * 2 + 0] = odims[i]; tdims[i * 2 + 1] = 1; tdims[i * 2 + 0] = dims1[i]; ndims1[i * 2 + 1] = 1; ndims1[i * 2 + 0] = dims1[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = 0; dims1B[i] = dims1[i]; ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } } complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE); //conv(2 * N, flags, CONV_VALID, CONV_SYMMETRIC, nodims, dst, tdims, tmp, ndims2, src2); convH(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, src1, ndims2, src2); complex float* src1B = md_alloc(N, dims1B, CFL_SIZE); long str1[2 * N]; long str2[2 * N]; md_calc_strides(2 * N, str1, ndims1, CFL_SIZE); md_calc_strides(2 * N, str2, tdims, CFL_SIZE); long off = md_calc_offset(2 * N, str1, shift); md_clear(N, dims1B, src1B, CFL_SIZE); //md_copy2(2 * N, tdims, str1, ((void*)src1B) + off, str2, tmp, sizeof(complex float));// FIXME: md_zadd2(2 * N, tdims, str1, ((void*)src1B) + off, str1, ((void*)src1B) + off, str2, tmp); md_resize_center(N, dims1, dst, dims1B, src1B, CFL_SIZE); md_free(src1B); md_free(tmp); } void overlapandsave2NE(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], complex float* src1, const long dims2[N], complex float* src2, const long mdims[N], complex float* msk) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims1[2 * N]; long ndims2[2 * N]; long shift[2 * N]; unsigned int nflags = 0; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { nflags = MD_SET(nflags, 2 * i); assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); // blocked output nodims[i * 2 + 1] = odims[i] / blk[i]; nodims[i * 2 + 0] = blk[i]; // expanded temporary storage tdims[i * 2 + 1] = dims1[i] / blk[i]; tdims[i * 2 + 0] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + // resized input dims1B[i] = dims1[i] + 2 * blk[i]; ndims1[i * 2 + 1] = dims1[i] / blk[i] + 2; // do we need two full blocks? ndims1[i * 2 + 0] = blk[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = blk[i] - (dims2[i] - 1) / 2; // kernel ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } else { nodims[i * 2 + 1] = 1; nodims[i * 2 + 0] = odims[i]; tdims[i * 2 + 1] = 1; tdims[i * 2 + 0] = dims1[i]; ndims1[i * 2 + 1] = 1; ndims1[i * 2 + 0] = dims1[i]; shift[i * 2 + 1] = 0; shift[i * 2 + 0] = 0; dims1B[i] = dims1[i]; ndims2[i * 2 + 1] = 1; ndims2[i * 2 + 0] = dims2[i]; } } complex float* src1B = md_alloc(N, dims1B, CFL_SIZE); complex float* tmp = md_alloc(2 * N, tdims, CFL_SIZE); complex float* tmpX = md_alloc(N, odims, CFL_SIZE); long str1[2 * N]; long str2[2 * N]; md_calc_strides(2 * N, str1, ndims1, sizeof(complex float)); md_calc_strides(2 * N, str2, tdims, sizeof(complex float)); long off = md_calc_offset(2 * N, str1, shift); md_resize_center(N, dims1B, src1B, dims1, src1, sizeof(complex float)); // we can loop here md_copy2(2 * N, tdims, str2, tmp, str1, ((void*)src1B) + off, sizeof(complex float)); conv(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, nodims, tmpX, tdims, tmp, ndims2, src2); long ostr[N]; long mstr[N]; md_calc_strides(N, ostr, odims, sizeof(complex float)); md_calc_strides(N, mstr, mdims, sizeof(complex float)); md_zmul2(N, odims, ostr, tmpX, ostr, tmpX, mstr, msk); convH(2 * N, nflags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2); md_clear(N, dims1B, src1B, sizeof(complex float)); md_zadd2(2 * N, tdims, str1, ((void*)src1B) + off, str1, ((void*)src1B) + off, str2, tmp); // md_resize_center(N, dims1, dst, dims1B, src1B, sizeof(complex float)); md_free(src1B); md_free(tmpX); md_free(tmp); } void overlapandsave2NEB(int N, unsigned int flags, const long blk[N], const long odims[N], complex float* dst, const long dims1[N], const complex float* src1, const long dims2[N], const complex float* src2, const long mdims[N], const complex float* msk) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims2[2 * N]; long nmdims[2 * N]; int e = N; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); assert((1 == mdims[i]) || (mdims[i] == dims1[i])); // blocked output nodims[e] = odims[i] / blk[i]; nodims[i] = blk[i]; // expanded temporary storage tdims[e] = dims1[i] / blk[i]; tdims[i] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + if (1 == mdims[i]) { nmdims[2 * i + 1] = 1; nmdims[2 * i + 1] = 1; } else { nmdims[2 * i + 1] = mdims[i] / blk[i]; nmdims[2 * i + 0] = blk[i]; } // resized input // minimal padding dims1B[i] = dims1[i] + (dims2[i] - 1); // kernel ndims2[e] = 1; ndims2[i] = dims2[i]; e++; } else { nodims[i] = odims[i]; tdims[i] = dims1[i]; nmdims[2 * i + 1] = 1; nmdims[2 * i + 0] = mdims[i]; dims1B[i] = dims1[i]; ndims2[i] = dims2[i]; } } int NE = e; //long S = md_calc_size(N, dims1B, 1); long str1[NE]; long str1B[N]; md_calc_strides(N, str1B, dims1B, sizeof(complex float)); e = N; for (int i = 0; i < N; i++) { str1[i] = str1B[i]; if (MD_IS_SET(flags, i)) str1[e++] = str1B[i] * blk[i]; } assert(NE == e); long str2[NE]; md_calc_strides(NE, str2, tdims, sizeof(complex float)); long ostr[NE]; long mstr[NE]; long mstrB[2 * N]; md_calc_strides(NE, ostr, nodims, sizeof(complex float)); md_calc_strides(2 * N, mstrB, nmdims, sizeof(complex float)); e = N; for (int i = 0; i < N; i++) { mstr[i] = mstrB[2 * i + 0]; if (MD_IS_SET(flags, i)) mstr[e++] = mstrB[2 * i + 1]; } assert(NE == e); const complex float* src1B = src1;//! //complex float* src1B = xmalloc(S * sizeof(complex float)); //md_resizec(N, dims1B, src1B, dims1, src1, sizeof(complex float)); // we can loop here assert(NE == N + 3); assert(1 == ndims2[N + 0]); assert(1 == ndims2[N + 1]); assert(1 == ndims2[N + 2]); assert(tdims[N + 0] == nodims[N + 0]); assert(tdims[N + 1] == nodims[N + 1]); assert(tdims[N + 2] == nodims[N + 2]); //complex float* src1C = xmalloc(S * sizeof(complex float)); complex float* src1C = dst; md_clear(N, dims1B, src1C, sizeof(complex float)); // must be done here #pragma omp parallel for collapse(3) for (int k = 0; k < nodims[N + 2]; k++) { for (int j = 0; j < nodims[N + 1]; j++) { for (int i = 0; i < nodims[N + 0]; i++) { complex float* tmp = md_alloc_sameplace(N, tdims, CFL_SIZE, dst); complex float* tmpX = md_alloc_sameplace(N, nodims, CFL_SIZE, dst); long off1 = str1[N + 0] * i + str1[N + 1] * j + str1[N + 2] * k; long off2 = mstr[N + 0] * i + mstr[N + 1] * j + mstr[N + 2] * k; md_copy2(N, tdims, str2, tmp, str1, ((const void*)src1B) + off1, sizeof(complex float)); conv(N, flags, CONV_VALID, CONV_SYMMETRIC, nodims, tmpX, tdims, tmp, ndims2, src2); md_zmul2(N, nodims, ostr, tmpX, ostr, tmpX, mstr, ((const void*)msk) + off2); convH(N, flags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2); #pragma omp critical md_zadd2(N, tdims, str1, ((void*)src1C) + off1, str1, ((void*)src1C) + off1, str2, tmp); md_free(tmpX); md_free(tmp); }}} //md_resizec(N, dims1, dst, dims1B, src1C, sizeof(complex float)); //free(src1C); //free(src1B); } void overlapandsave2HB(int N, unsigned int flags, const long blk[N], const long dims1[N], complex float* dst, const long odims[N], const complex float* src1, const long dims2[N], const complex float* src2, const long mdims[N], const complex float* msk) { long dims1B[N]; long tdims[2 * N]; long nodims[2 * N]; long ndims2[2 * N]; long nmdims[2 * N]; int e = N; for (int i = 0; i < N; i++) { if (MD_IS_SET(flags, i)) { assert(1 == dims2[i] % 2); assert(0 == blk[i] % 2); assert(0 == dims1[i] % 2); assert(0 == odims[i] % blk[i]); assert(0 == dims1[i] % blk[i]); assert(dims1[i] == odims[i]); assert(dims2[i] <= blk[i]); assert(dims1[i] >= dims2[i]); assert((1 == mdims[i]) || (mdims[i] == dims1[i])); // blocked output nodims[e] = odims[i] / blk[i]; nodims[i] = blk[i]; // expanded temporary storage tdims[e] = dims1[i] / blk[i]; tdims[i] = blk[i] + dims2[i] - 1; // blocked input // ---|---,---,---|--- // + +++ + // + +++ + if (1 == mdims[i]) { nmdims[2 * i + 1] = 1; nmdims[2 * i + 1] = 1; } else { nmdims[2 * i + 1] = mdims[i] / blk[i]; nmdims[2 * i + 0] = blk[i]; } // resized input // minimal padding dims1B[i] = dims1[i] + (dims2[i] - 1); // kernel ndims2[e] = 1; ndims2[i] = dims2[i]; e++; } else { nodims[i] = odims[i]; tdims[i] = dims1[i]; nmdims[2 * i + 1] = 1; nmdims[2 * i + 0] = mdims[i]; dims1B[i] = dims1[i]; ndims2[i] = dims2[i]; } } int NE = e; // long S = md_calc_size(N, dims1B, 1); long str1[NE]; long str1B[N]; md_calc_strides(N, str1B, dims1B, sizeof(complex float)); e = N; for (int i = 0; i < N; i++) { str1[i] = str1B[i]; if (MD_IS_SET(flags, i)) str1[e++] = str1B[i] * blk[i]; } assert(NE == e); long str2[NE]; md_calc_strides(NE, str2, tdims, sizeof(complex float)); long ostr[NE]; long mstr[NE]; long mstrB[2 * N]; md_calc_strides(NE, ostr, nodims, sizeof(complex float)); md_calc_strides(2 * N, mstrB, nmdims, sizeof(complex float)); e = N; for (int i = 0; i < N; i++) { mstr[i] = mstrB[2 * i + 0]; if (MD_IS_SET(flags, i)) mstr[e++] = mstrB[2 * i + 1]; } assert(NE == e); // we can loop here assert(NE == N + 3); assert(1 == ndims2[N + 0]); assert(1 == ndims2[N + 1]); assert(1 == ndims2[N + 2]); assert(tdims[N + 0] == nodims[N + 0]); assert(tdims[N + 1] == nodims[N + 1]); assert(tdims[N + 2] == nodims[N + 2]); //complex float* src1C = xmalloc(S * sizeof(complex float)); complex float* src1C = dst; md_clear(N, dims1B, src1C, CFL_SIZE); // must be done here #pragma omp parallel for collapse(3) for (int k = 0; k < nodims[N + 2]; k++) { for (int j = 0; j < nodims[N + 1]; j++) { for (int i = 0; i < nodims[N + 0]; i++) { complex float* tmp = md_alloc_sameplace(N, tdims, CFL_SIZE, dst); complex float* tmpX = md_alloc_sameplace(N, nodims, CFL_SIZE, dst); long off1 = str1[N + 0] * i + str1[N + 1] * j + str1[N + 2] * k; long off2 = mstr[N + 0] * i + mstr[N + 1] * j + mstr[N + 2] * k; long off3 = ostr[N + 0] * i + ostr[N + 1] * j + ostr[N + 2] * k; md_zmul2(N, nodims, ostr, tmpX, ostr, ((const void*)src1) + off3, mstr, ((const void*)msk) + off2); convH(N, flags, CONV_VALID, CONV_SYMMETRIC, tdims, tmp, nodims, tmpX, ndims2, src2); #pragma omp critical md_zadd2(N, tdims, str1, ((void*)src1C) + off1, str1, ((void*)src1C) + off1, str2, tmp); md_free(tmpX); md_free(tmp); }}} }
binarytrees.c
#include <stdlib.h> #include <stdio.h> typedef off_t off64_t; #include <apr_pools.h> const size_t LINE_SIZE = 64; struct node { int i; struct node *left; struct node *right; }; int node_check(const struct node *n) { if (n->left) { int lc = node_check (n->left); int rc = node_check (n->right); return lc + n->i - rc; } return n->i; } struct node * node_get_avail (apr_pool_t *pool) { return apr_palloc (pool, sizeof(struct node)); } struct node * make (int i, int depth, apr_pool_t *pool) { struct node *curr = node_get_avail (pool); curr->i = i; if (depth > 0) { curr->left = make (2*i-1, depth - 1, pool); curr->right = make (2*i , depth - 1, pool); } else { curr->left = NULL; curr->right = NULL; } return curr; } int main(int argc, char *argv[]) { apr_pool_t *long_lived_pool; int min_depth = 4; int req_depth = (argc == 2 ? atoi(argv[1]) : 10); int max_depth = (req_depth > min_depth + 2 ? req_depth : min_depth + 2); int stretch_depth = max_depth+1; apr_initialize(); /* Alloc then dealloc stretchdepth tree */ { apr_pool_t *store; struct node *curr; apr_pool_create (&store, NULL); curr = make (0, stretch_depth, store); printf ("stretch tree of depth %i\t check: %i\n", stretch_depth, node_check (curr)); apr_pool_destroy (store); } apr_pool_create (&long_lived_pool, NULL); { struct node *long_lived_tree = make(0, max_depth, long_lived_pool); /* buffer to store output of each thread */ char *outputstr = (char*) malloc(LINE_SIZE * (max_depth +1) * sizeof(char)); int d; #pragma omp parallel for for (d = min_depth; d <= max_depth; d += 2) { int iterations = 1 << (max_depth - d + min_depth); apr_pool_t *store; int c = 0, i; apr_pool_create (&store, NULL); for (i = 1; i <= iterations; ++i) { struct node *a, *b; a = make ( i, d, store); b = make (-i, d, store); c += node_check (a) + node_check (b); apr_pool_clear (store); } apr_pool_destroy (store); /* each thread write to separate location */ sprintf(outputstr + LINE_SIZE * d, "%d\t trees of depth %d\t check: %d\n", (2 * iterations), d, c); } /* print all results */ for (d = min_depth; d <= max_depth; d += 2) printf("%s", outputstr + (d * LINE_SIZE) ); free(outputstr); printf ("long lived tree of depth %i\t check: %i\n", max_depth, node_check (long_lived_tree)); return 0; } } /* MAKE: /usr/bin/gcc -pipe -Wall -O3 -fomit-frame-pointer -march=native -fopenmp -D_FILE_OFFSET_BITS=64 -I/usr/include/apr-1.0 -lapr-1 -lgomp binarytrees.gcc-7.c -o binarytrees.gcc-7.gcc_run rm binarytrees.gcc-7.c */
psimci.c
/********************************************************************* * Program: psimci.c * Author: Mauricio Caceres Bravo <caceres@nber.org> * Created: Sun Feb 12 19:28:43 EST 2017 * Updated: Tue May 30 18:11:50 EDT 2017 * Purpose: Stata plugin to simulate a CI under H0: b = 0 for a * treatment effect given a regression specification. * Note: See stata.com/plugins for more on Stata plugins *********************************************************************/ /** * @file psimci.c * @author Mauricio Caceres bravo * @date 30 May 2017 * @brief Stata plugin to simulate a CI for a placebo treatment. * * See the documentation for simci.ado (e.g. help simci from Stata) * * @see http://www.stata.com/plugins */ #include <math.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <gsl/gsl_blas.h> #include <gsl/gsl_linalg.h> #include <gsl/gsl_matrix_double.h> #include <gsl/gsl_permutation.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_sort_vector.h> #include "psimci.h" #include "stplugin.h" #include "stutils.c" /** * @brief Main function call will execute as Stata plugin * * The function takes the first variable in @argc as the dependent * variable and the next k - 1 variables are covariates. @argc contains * the comma options passed by Stata. Currently just the proportion * randomzed and the number of simulations to run. See documentation for * simci.ado or sim_ci below for moew. * * @param argc List of variables to use * @param argv Comma options from Stata * @return Modified variables in Stata * @see Documentation for simci.ado * @warning This is meant to be run from simci.ado and not by itself */ STDLL stata_call(int argc, char *argv[]) { // Simple call to see if the plugin loaded char buffer[16]; strcpy (buffer, argv[0]); if ( strcmp(buffer, "check") == 0 ) return (0); // Initialize the variables to use ST_int i, j ; ST_double z ; ST_retcode rc ; // Get P and number of reps. Note the 0-based indexing! So the // functiona ssumes P and reps were the 1st and 3nd argument. double P = strtod (argv[0], NULL); int reps = strtod (argv[1], NULL); const size_t n = SF_in2(); const int k = SF_nvars(); // If too few variables (at least 2 for regressio), exit if (k < 2) { return (102) ; } // Initialize GSL elements where to store data gsl_matrix *X = gsl_matrix_alloc (n, k + 1); gsl_vector *y = gsl_vector_alloc (n); // Not sure if there is another way to read data vs the double loop. // Note: Careful with the 0-based indexing! for (i = SF_in1(); i <= SF_in2(); i++) { if (SF_ifobs(i)) { // Variables 2 through k are covariates for (j = 2; j <= k; j++) { // Note we leave the first column empty if ( (rc = SF_vdata(j, i, &z)) ) return(rc); gsl_matrix_set (X, i - 1, j - 1, z); } // Note we add the constant gsl_matrix_set (X, i - 1, k, 1.0); // Variable 1 is the dependent variable if ( (rc = SF_vdata(1, i, &z)) ) return(rc); gsl_vector_set (y, i - 1, z); } } // Now we call the simulation function and output the results into b, mu gsl_vector *b = gsl_vector_alloc (reps); gsl_vector *mu = gsl_vector_alloc (reps); sim_ci (X, y, P, reps, b, mu); // Not sure that there is a good way to output this into Stata So I // write to a file and read it back. char outb[64], outmu[64]; strcpy (outb, argv[2]); strcpy (outmu, argv[2]); strcat (outb, "b"); strcat (outmu, "mu"); FILE *fb = fopen (outb, "wb"); FILE *fmu = fopen (outmu, "wb"); gsl_vector_fprintf (fb, b, "%15.9f"); gsl_vector_fprintf (fmu, mu, "%15.9f"); fclose (fb); fclose (fmu); // Cleanup gsl_matrix_free (X); gsl_vector_free (y); gsl_vector_free (b); gsl_vector_free (mu); return (0); } /** * @brief Simulate a confidence interval given X, y * * The idea is to simulate a non-parametric CI based on placebo * assignments of a treatment variable. The program assigns * treatment at random, hence a null effect, to individuals or * clusters, optionally stratifying by any number of variables (or * the means thereof, in the case of clusters). Consider * * Y_ij = a + b T_j + g X_ij + e_ij * * There are C = J choose PJ ways to treat the clusters (or C = * P choose PN in the case of individuals). If we computed b_ols * for c = 1, ..., C we would know the exact distribution of our * estimator, conditional on the data being representative of the * study data. C is typically intractably large, hence we simulate * K draws with sum(T_jk = PJ) and run * * Y_ij = a + b_k T_jk + g X_ij + e_ij * * Let Fhat be the empirical cdf of b_k; a valid 1 - alpha CI for * b is given by * * CI(1 - a) = [Fhat^-1(a / 2), Fhat^-1(1 - a / 2)] * * The function takes the @X as the covariate matrix, which * must have k + 1 columns with the first column free, @y as * the dependent variable, and outputs the results to @b, @mu * * @param X Covariate matrix with first column blank * @param y Dependent variable * @param P Proportion in treatment * @param reps Number of reprtitions * @param b Vector of length @reps; will output coefficients here * @param mu Vector of length @reps; will output control means here * @return Modified @b, @mu with coefficients and means * @see Documentation for simci.ado */ int sim_ci (const gsl_matrix * X, const gsl_vector * y, const double P, const int reps, gsl_vector * b, gsl_vector * mu) { const size_t n = X->size1; const int k = X->size2; const int np = ceil(n * P); const int nc = n - np; double *sy = malloc (sizeof(double)); gsl_vector *ones = gsl_vector_alloc (n); gsl_vector_set_all (ones, 1.0); gsl_blas_ddot (ones, y, sy); // Set the random seed based on the time of day (seconds) srand (time(NULL)); gsl_rng *rng = gsl_rng_alloc (gsl_rng_default); gsl_rng_set (rng, rand()); // Get vector of 1s and 0s gsl_vector *T = gsl_vector_alloc (n); gsl_vector_set_zero (T); for (int i = 0; i < np; i++) { gsl_vector_set (T, i, 1.0); } // Initialize elements for parallel loop gsl_vector *Tp ; gsl_matrix *Xp ; int nloops ; double *sty ; // Get the number of threads available to OMP sf_printf("Parallelizing simulation; %d threads found:\n", get_omp_num_threads()); // Parallelize execution: Note We need a copy of Xp and Tp for each // thread since they will be modified at each iteration y does not // change, so it's shared. #pragma omp parallel private(Xp, Tp, nloops, sty) shared(y, b, sy) { nloops = 0; // Allocate to each therad their own copy Tp = gsl_vector_alloc (n); Xp = gsl_matrix_alloc (n, k); sty = malloc (sizeof(double)); gsl_vector_memcpy (Tp, T); gsl_matrix_memcpy (Xp, X); // Parallel for loop through simulation #pragma omp for for (int r = 0; r < reps; r++) { // 1. Shuffle treatment // 2. Set as first column of covariate matrix // 3. Get mean of y over controls // 4. Store coefficient/mean // 5. Repeat 1-4 // 6. ... // 7. Profit? gsl_ran_shuffle (rng, Tp->data, n, sizeof(size_t)); gsl_matrix_set_col (Xp, 0, Tp); gsl_vector_set (b, r, sim_ols(Xp, y)); gsl_blas_ddot (Tp, y, sty); gsl_vector_set (mu, r, (*sy - *sty) / nc); ++nloops; } // I want to print a pretty message saying how many iterations // each thread completed. Since threads finish on their own, // messages would be print at disparate times. However, one can // specify "critical" code which is executed only after all // threads are done running. #pragma omp critical { sf_printf("\tThread %d performed %d simulations.\n", omp_get_thread_num(), nloops); } // Cleanup gsl_matrix_free (Xp); gsl_vector_free (Tp); } // Cleanup gsl_vector_free (T); gsl_rng_free (rng); return (0); } /** * @brief Number of threads available to OMP * * Short wrapper to get number of threads available to OMP * * @return Number of threads available to OMP */ int get_omp_num_threads() { int thread_id; int nthreads = 0; #pragma omp parallel private(thread_id) shared(nthreads) { thread_id = omp_get_thread_num(); #pragma omp critical { nthreads = thread_id > nthreads? thread_id: nthreads; } } nthreads++; return (nthreads); } /** * @brief Wrapper to run a linear regression * * All I want is the first coefficient of a linear regression. For * * Y = X beta * * I want (X' X)^-1 X' Y. GSL has solvers for a system of the form * * Ax = b * * Where A is a symmetric matrix. Take A = X' X and b = X' y, then * we can use any number of routines to find x (especially since A * is now symmetric). * * @param X A n by k gsl matrix containing covariates. * @param y A n by 1 gsl vector containing the dependent variable * @return The first coefficient of a linear regression. * @warning This is meant to be run within the main loop of stata_call */ double sim_ols(const gsl_matrix * X, const gsl_vector * y) { // Allocate memory to express the system as Ax = b gsl_matrix *A = gsl_matrix_alloc (X->size2, X->size2); gsl_vector *b = gsl_vector_alloc (X->size2); gsl_vector *x = gsl_vector_alloc (X->size2); // Set A = X' X and b = X' y gsl_blas_dgemm (CblasTrans, CblasNoTrans, 1.0, X, X, 0.0, A); gsl_blas_dgemv (CblasTrans, 1.0, X, y, 0.0, b); // Cholesky decomposition gsl_linalg_cholesky_decomp1 (A); gsl_linalg_cholesky_solve (A, b, x); // You don't have to use Cholesky; a number of methods are available // // int s; // gsl_permutation * P = gsl_permutation_alloc (X->size2); // gsl_vector * tau = gsl_vector_alloc (X->size2); // // Householder // gsl_linalg_HH_solve (A, b, x); // // LU decomposition // gsl_linalg_LU_decomp (A, P, &s); // gsl_linalg_LU_solve (A, P, b, x); // gsl_permutation_free (P); // // QR decomposition // gsl_linalg_QR_decomp (A, tau); // gsl_linalg_QR_solve (A, tau, b, x); // gsl_vector_free (tau); // Free up space gsl_matrix_free (A); gsl_vector_free (b); return (gsl_vector_get(x, 0)); } /** * @brief Get pctile of a function * * Basic wrapper to get the @pctile percentile of a function. * * @param x n by 1 gsl vector whose percentile we want. * @param pctile Percentile * @return @pctile percentile of x */ double sim_pctile(gsl_vector * x, double pctile) { gsl_sort_vector (x); int n = x->size; int i = floor(n * pctile); double qq = gsl_vector_get (x, i); if (i / n == pctile) { qq = (qq + gsl_vector_get (x, i + 1)) / 2; } return (qq); }
GB_binop__bset_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bset_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bset_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bset_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bset_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bset_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bset_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint32) // C=scalar+B GB (_bind1st__bset_uint32) // C=scalar+B' GB (_bind1st_tran__bset_uint32) // C=A+scalar GB (_bind2nd__bset_uint32) // C=A'+scalar GB (_bind2nd_tran__bset_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = GB_BITSET (aij, bij, uint32_t, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITSET (x, y, uint32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BSET || GxB_NO_UINT32 || GxB_NO_BSET_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bset_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bset_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bset_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bset_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bset_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bset_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bset_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bset_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bset_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITSET (x, bij, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bset_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITSET (aij, y, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (x, aij, uint32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bset_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITSET (aij, y, uint32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bset_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
lis_matrix_dns.c
/* Copyright (C) 2002-2012 The SSI Project. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE SCALABLE SOFTWARE INFRASTRUCTURE PROJECT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef HAVE_CONFIG_H #include "lis_config.h" #else #ifdef HAVE_CONFIG_WIN32_H #include "lis_config_win32.h" #endif #endif #include <stdio.h> #include <stdlib.h> #ifdef HAVE_MALLOC_H #include <malloc.h> #endif #include <string.h> #include <stdarg.h> #include <math.h> #ifdef _OPENMP #include <omp.h> #endif #ifdef USE_MPI #include <mpi.h> #endif #include "lislib.h" /************************************************ * function | SOM | *-----------------------------+-----+ * lis_matrix_set | o | * lis_matrix_setDLU | o | * lis_matrix_malloc | o | * lis_matrix_elements_copy | o | * lis_matrix_transpose | o | * lis_matrix_split | o | * lis_matrix_merge | o | *-----------------------------+-----+-----+ * function |merge|split| *-----------------------------+-----+-----| * lis_matrix_convert | o | | * lis_matrix_copy | o | o | * lis_matrix_get_diagonal | o | o | * lis_matrix_scaling | o | o | * lis_matrix_scaling_symm | o | o | * lis_matrix_normf | o | o | * lis_matrix_sort | o | o | * lis_matrix_solve | xxx | o | * lis_matrix_solvet | xxx | o | ************************************************/ #undef __FUNC__ #define __FUNC__ "lis_matrix_set_dns" LIS_INT lis_matrix_set_dns(LIS_SCALAR *value, LIS_MATRIX A) { LIS_INT err; LIS_DEBUG_FUNC_IN; #if 0 err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; #else if(lis_matrix_is_assembled(A)) return LIS_SUCCESS; else { err = lis_matrix_check(A,LIS_MATRIX_CHECK_SET); if( err ) return err; } #endif A->value = value; A->is_copy = LIS_FALSE; A->status = -LIS_MATRIX_DNS; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_malloc_dns" LIS_INT lis_matrix_malloc_dns(LIS_INT n, LIS_INT np, LIS_SCALAR **value) { LIS_DEBUG_FUNC_IN; *value = NULL; *value = (LIS_SCALAR *)lis_malloc( n*np*sizeof(LIS_SCALAR),"lis_matrix_malloc_dns::value" ); if( *value==NULL ) { LIS_SETERR_MEM(n*np*sizeof(LIS_SCALAR)); return LIS_OUT_OF_MEMORY; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_elements_copy_dns" LIS_INT lis_matrix_elements_copy_dns(LIS_INT n, LIS_INT np, LIS_SCALAR *value, LIS_SCALAR *o_value) { LIS_INT i,j,is,ie; LIS_INT nprocs,my_rank; LIS_DEBUG_FUNC_IN; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif #ifdef _OPENMP #pragma omp parallel private(i,j,is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(j=0;j<np;j++) { for(i=is;i<ie;i++) { o_value[j*n + i] = value[j*n + i]; } } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_copy_dns" LIS_INT lis_matrix_copy_dns(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT err; LIS_INT i,n,np; LIS_SCALAR *value; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; n = Ain->n; np = Ain->np; value = NULL; err = lis_matrix_malloc_dns(n,np,&value); if( err ) { return err; } lis_matrix_elements_copy_dns(n,np,Ain->value,value); if( Ain->is_splited ) { err = lis_matrix_diag_duplicateM(Ain,&D); if( err ) { lis_free(value); return err; } #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0;i<n;i++) { D->value[i] = Ain->value[i*n + i]; } Aout->D = D; } err = lis_matrix_set_dns(value,Aout); if( err ) { lis_free(value); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_get_diagonal_dns" LIS_INT lis_matrix_get_diagonal_dns(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i; LIS_INT n; LIS_DEBUG_FUNC_IN; n = A->n; #ifdef _OPENMP #pragma omp parallel for private(i) #endif for(i=0; i<n; i++) { d[i] = A->value[i*n + i]; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_dns" LIS_INT lis_matrix_scaling_dns(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j; LIS_INT n,np; LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; for(j=0;j<np;j++) { for(i=0;i<n;i++) { A->value[j*n + i] *= d[i]; } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_scaling_symm_dns" LIS_INT lis_matrix_scaling_symm_dns(LIS_MATRIX A, LIS_SCALAR d[]) { LIS_INT i,j; LIS_INT n,np; LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; for(j=0;j<np;j++) { for(i=0;i<n;i++) { A->value[j*n + i] *= d[i]*d[j]; } } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_normf_dns" LIS_INT lis_matrix_normf_dns(LIS_MATRIX A, LIS_SCALAR *nrm) { LIS_INT i,j; LIS_INT n; LIS_SCALAR sum; LIS_DEBUG_FUNC_IN; n = A->n; sum = (LIS_SCALAR)0; if( A->is_splited ) { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->D->value[i]*A->D->value[i]; for(j=A->L->index[i];j<A->L->index[i+1];j++) { sum += A->L->value[j]*A->L->value[j]; } for(j=A->U->index[i];j<A->U->index[i+1];j++) { sum += A->U->value[j]*A->U->value[j]; } } } else { #ifdef _OPENMP #pragma omp parallel for reduction(+:sum) private(i,j) #endif for(i=0; i<n; i++) { sum += A->value[i]*A->value[i]; for(j=A->index[i];j<A->index[i+1];j++) { sum += A->value[j]*A->value[j]; } } } *nrm = sqrt(sum); LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_transpose_dns" LIS_INT lis_matrix_transpose_dns(LIS_MATRIX Ain, LIS_MATRIX *Aout) { LIS_DEBUG_FUNC_IN; /* err = lis_matrix_convert_dns2ccs(Ain,Aout);*/ (*Aout)->matrix_type = LIS_MATRIX_DNS; (*Aout)->status = LIS_MATRIX_DNS; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_split_dns" LIS_INT lis_matrix_split_dns(LIS_MATRIX A) { LIS_INT i,n; LIS_INT err; LIS_MATRIX_DIAG D; LIS_DEBUG_FUNC_IN; n = A->n; err = lis_matrix_diag_duplicateM(A,&D); if( err ) { return err; } for(i=0;i<n;i++) { D->value[i] = A->value[i*n + i]; } A->D = D; A->is_splited = LIS_TRUE; A->is_save = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_merge_dns" LIS_INT lis_matrix_merge_dns(LIS_MATRIX A) { LIS_DEBUG_FUNC_IN; A->is_splited = LIS_FALSE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_sort_dns" LIS_INT lis_matrix_sort_dns(LIS_MATRIX A) { LIS_DEBUG_FUNC_IN; A->is_sorted = LIS_TRUE; LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solve_dns" LIS_INT lis_matrix_solve_dns(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,n,np; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; b = B->value; x = X->value; switch(flag) { case LIS_MATRIX_LOWER: for(i=0;i<n;i++) { t = b[i]; for(j=0;j<i;j++) { t -= A->value[j*n + i] * x[j]; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_UPPER: for(i=n-1;i>=0;i--) { t = b[i]; for(j=i+1;j<np;j++) { t -= A->value[j*n + i] * x[j]; } x[i] = t * A->WD->value[i]; } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { t = b[i]; for(j=0;j<i;j++) { t -= A->value[j*n + i] * x[j]; } x[i] = t * A->WD->value[i]; } for(i=n-1;i>=0;i--) { t = 0.0; for(j=i+1;j<n;j++) { t += A->value[j*n + i] * x[j]; } x[i] -= t * A->WD->value[i]; } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_solvet_dns" LIS_INT lis_matrix_solvet_dns(LIS_MATRIX A, LIS_VECTOR B, LIS_VECTOR X, LIS_INT flag) { LIS_INT i,j,n,np; LIS_SCALAR t; LIS_SCALAR *b,*x; LIS_DEBUG_FUNC_IN; n = A->n; np = A->np; b = B->value; x = X->value; lis_vector_copy(B,X); switch(flag) { case LIS_MATRIX_LOWER: for(i=0;i<n;i++) { x[i] = x[i] * A->WD->value[i]; for(j=i+1;j<np;j++) { x[j] -= A->value[j*n + i] * x[i]; } } break; case LIS_MATRIX_UPPER: for(i=n-1;i>=0;i--) { x[i] = x[i] * A->WD->value[i]; for(j=0;j<i;j++) { x[j] -= A->value[j*n + i] * x[i]; } } break; case LIS_MATRIX_SSOR: for(i=0;i<n;i++) { t = x[i] * A->WD->value[i]; for(j=i+1;j<np;j++) { x[j] -= A->value[j*n + i] * t; } } for(i=n-1;i>=0;i--) { t = x[i] * A->WD->value[i]; x[i] = t; for(j=0;j<i;j++) { x[j] -= A->value[j*n + i] * t; } } break; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_crs2dns" LIS_INT lis_matrix_convert_crs2dns(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j; LIS_INT err; LIS_INT n,np,nprocs,my_rank; LIS_INT is,ie; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; np = Ain->np; #ifdef _OPENMP nprocs = omp_get_max_threads(); #else nprocs = 1; #endif value = NULL; err = lis_matrix_malloc_dns(n,np,&value); if( err ) { return err; } /* convert dns */ #ifdef _OPENMP #pragma omp parallel private(i,j,is,ie,my_rank) #endif { #ifdef _OPENMP my_rank = omp_get_thread_num(); #else my_rank = 0; #endif LIS_GET_ISIE(my_rank,nprocs,n,is,ie); for(j=0;j<np;j++) { for(i=is;i<ie;i++) { value[j*n+i] = (LIS_SCALAR)0.0; } } for(i=is;i<ie;i++) { for(j=Ain->ptr[i];j<Ain->ptr[i+1];j++) { value[i + n*Ain->index[j]] = Ain->value[j]; } } } err = lis_matrix_set_dns(value,Aout); if( err ) { lis_free(value); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; } #undef __FUNC__ #define __FUNC__ "lis_matrix_convert_dns2crs" LIS_INT lis_matrix_convert_dns2crs(LIS_MATRIX Ain, LIS_MATRIX Aout) { LIS_INT i,j,k; LIS_INT err; LIS_INT gn,n,np,nnz,is,ie; LIS_INT *ptr,*index; LIS_SCALAR *value; LIS_DEBUG_FUNC_IN; n = Ain->n; np = Ain->np; gn = Ain->gn; is = Ain->is; ie = Ain->ie; ptr = NULL; index = NULL; value = NULL; ptr = (LIS_INT *)lis_malloc( (n+1)*sizeof(LIS_INT),"lis_matrix_convert_dns2crs::ptr" ); if( ptr==NULL ) { LIS_SETERR_MEM((n+1)*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } #ifdef _OPENMP #pragma omp parallel for private(i,j) #endif for(i=0;i<n;i++) { ptr[i+1] = 0; for(j=0;j<np;j++) { if( Ain->value[j*n+i]!=(LIS_SCALAR)0.0 ) { ptr[i+1]++; } } } ptr[0] = 0; for(i=0;i<n;i++) { ptr[i+1] += ptr[i]; } nnz = ptr[n]; index = (LIS_INT *)lis_malloc( nnz*sizeof(LIS_INT),"lis_matrix_convert_dns2crs::index" ); if( index==NULL ) { lis_free2(3,ptr,index,value); LIS_SETERR_MEM(nnz*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } value = (LIS_SCALAR *)lis_malloc( nnz*sizeof(LIS_SCALAR),"lis_matrix_convert_dns2crs::value" ); if( value==NULL ) { lis_free2(3,ptr,index,value); LIS_SETERR_MEM(nnz*sizeof(LIS_INT)); return LIS_OUT_OF_MEMORY; } /* convert crs */ #ifdef _OPENMP #pragma omp parallel for private(i,j,k) #endif for(i=0;i<n;i++) { k = ptr[i]; for(j=0;j<np;j++) { if( Ain->value[j*n + i]!=(LIS_SCALAR)0.0 ) { value[k] = Ain->value[j*n + i]; index[k] = j; k++; } } } err = lis_matrix_set_crs(nnz,ptr,index,value,Aout); if( err ) { lis_free2(3,ptr,index,value); return err; } err = lis_matrix_assemble(Aout); if( err ) { lis_matrix_storage_destroy(Aout); return err; } LIS_DEBUG_FUNC_OUT; return LIS_SUCCESS; }
pi.c
/* * pi.c calculates the value of Pi through numerical integration of * the function f(x)=4/(1+x*x) over [0,1]. The interval [0,1] is divided * into n subintervals each of size h=1/n. * */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <omp.h> double CalcPi(int n); int main(int argc, char **argv) { int n = 150000000; const double fPi25DT = 3.141592653589793238462643; double fPi; double fTimeStart, fTimeEnd; #ifdef READ_INPUT printf("Enter the number of intervals: "); scanf("%d",&n); #endif if (n <= 0 || n > 2147483647 ) { printf("\ngiven value has to be between 0 and 2147483647\n"); return 1; } /* omp_get_wtime gets the wall time */ fTimeStart = omp_get_wtime(); /* the calculation is done here*/ fPi = CalcPi(n); fTimeEnd = omp_get_wtime(); printf("\npi is approximately = %.20f \nError = %.20f\n", fPi, fabs(fPi - fPi25DT)); printf(" wall clock time = %.20f\n", fTimeEnd - fTimeStart); return 0; } double f(double a) { return (4.0 / (1.0 + a*a)); } double CalcPi(int n) { const double fH = 1.0 / (double) n; double fSum = 0.0; double fX; int i; #pragma omp parallel for reduction(+:fSum) private(fX) for (i = 0; i < n; i += 1) { fX = fH * ((double)i + 0.5); fSum += f(fX); } return fH * fSum; }
GB_binop__rdiv_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc64) // A*D function (colscale): GB (_AxD__rdiv_fc64) // D*A function (rowscale): GB (_DxB__rdiv_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc64) // C=scalar+B GB (_bind1st__rdiv_fc64) // C=scalar+B' GB (_bind1st_tran__rdiv_fc64) // C=A+scalar GB (_bind2nd__rdiv_fc64) // C=A'+scalar GB (_bind2nd_tran__rdiv_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_div (bij, aij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_div (y, x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_div (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_div (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_div (aij, x) ; \ } GrB_Info GB (_bind1st_tran__rdiv_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_div (y, aij) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
n_body_sim_omp.c
/* * Parallelized (OpenMP) n-body solver for planets/stars in Euclid space * */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <omp.h> #define BODY_COUNT 10000 #define SIM_STEPS 50 // Number of frames to simulate #define G 6.674f * pow(10, -11) // Newton's universal const of gravity #define DELTA_T 0.01f // Time gap between two simulation frames (in simulation) typedef struct { float m, x, y, z, vx, vy, vz; } Body; int bye(double *tcalc) { printf("Simulation took %lf seconds.\n", *tcalc); exit(0); } int main(const int argc, const char **argv) { double tstart = 0.0, tstop = 0.0, tcalc = 0.0; // For timing Body *bodies = (Body *)malloc(BODY_COUNT * sizeof(Body)); // Allocate memory for bodies char file_name[100], body_count[10]; FILE *fp; sprintf(body_count, "%d", BODY_COUNT); strcpy(file_name, "/home/u47422/it17142038/assignment/dataset_"); strcat(file_name, body_count); strcat(file_name, ".csv"); fp = fopen(file_name, "r"); // read mode if (fp == NULL) { printf("Sorry, an error occured while reading input file.\n"); return 0; } fscanf(fp, "%*[^\n]\n"); // Skip headings in input csv file // Assume csv with nx7 values (mass, coord_x, coord_y, coord_z, velocity_x, velocity_y, velocity_z) for (int i = 0; i < BODY_COUNT; i++) fscanf(fp, "%f,%f,%f,%f,%f,%f,%f", &bodies[i].m, &bodies[i].x, &bodies[i].y, &bodies[i].z, &bodies[i].vx, &bodies[i].vy, &bodies[i].vz); tstart = omp_get_wtime(); for (int step = 0; step < SIM_STEPS; step++) { #pragma omp parallel for for (int i = 0; i < BODY_COUNT; i++) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for (int j = 0; j < BODY_COUNT; j++) { if (i == j) continue; const float dx = bodies[j].x - bodies[i].x; const float dy = bodies[j].y - bodies[i].y; const float dz = bodies[j].z - bodies[i].z; const float dist = sqrt(dx * dx + dy * dy + dz * dz); const float dist_cubed = dist * dist * dist; // Calculate forces Fx += G * bodies[i].m * bodies[j].m / dist_cubed * dx; Fy += G * bodies[i].m * bodies[j].m / dist_cubed * dy; Fz += G * bodies[i].m * bodies[j].m / dist_cubed * dz; } // Assign velocities #pragma omp critical { bodies[i].vx += DELTA_T * Fx; bodies[i].vy += DELTA_T * Fy; bodies[i].vz += DELTA_T * Fz; } } // Update coordinates (serial loop since not much work) for (int i = 0; i < BODY_COUNT; i++) { bodies[i].x += bodies[i].vx * DELTA_T; bodies[i].y += bodies[i].vy * DELTA_T; bodies[i].z += bodies[i].vz * DELTA_T; } } tstop = omp_get_wtime(); printf("body0:\n m:%.7f\n x:%.7f\n y:%.7f\n z:%.7f\n vx:%.7f\n vy:%.7f\n vz:%.7f\n", bodies[0].m, bodies[0].x, bodies[0].y, bodies[0].z, bodies[0].vx, bodies[0].vy, bodies[0].vz); /* For debugging purposes */ strcpy(file_name, "output_"); strcat(file_name, "omp_"); strcat(file_name, body_count); strcat(file_name, ".csv"); printf("Written all to %s\n", file_name); fp = fopen(file_name, "w"); // write mode if (fp == NULL) { printf("Sorry, an error occured while opening output file for writing.\n"); return 0; } // Write headers for csv fprintf(fp, "mass,coord_x,coord_y,coord_z,velocity_x,velocity_y,velocity_z\n"); // Write csv with nx7 values (mass, coord_x, coord_y, coord_z, velocity_x, velocity_y, velocity_z) for (int i = 0; i < BODY_COUNT; i++) fprintf(fp, "%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,%.7f\n", bodies[i].m, bodies[i].x, bodies[i].y, bodies[i].z, bodies[i].vx, bodies[i].vy, bodies[i].vz); fclose(fp); free(bodies); printf("Simulated %d frames for %d bodies\n", SIM_STEPS, BODY_COUNT); tcalc = tstop - tstart; bye(&tcalc); return 0; }
naive_math_impl.h
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <algorithm> #include <cmath> template <typename type> static void basic_trans_mat_to_c4(const type* input, type* output, const int ldin, const int M, const int K, bool pack_k) { const int m_round = (M + 3) / 4 * 4; int k_round = (K + 3) / 4 * 4; if (!pack_k) { k_round = K; } const int m_loop = m_round / 4; type* zero_buf = new type[K]; memset(zero_buf, 0, K * sizeof(type)); for (int i = 0; i < m_loop; ++i) { const type* in0 = input + i * 4 * ldin; const type* in1 = in0 + ldin; const type* in2 = in1 + ldin; const type* in3 = in2 + ldin; if (4 * (i + 1) - M > 0) { switch (4 * (i + 1) - M) { case 3: in1 = zero_buf; case 2: in2 = zero_buf; case 1: in3 = zero_buf; default: break; } } for (int j = 0; j < K; ++j) { *output++ = *in0++; *output++ = *in1++; *output++ = *in2++; *output++ = *in3++; } for (int j = K; j < k_round; ++j) { *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); } } delete[] zero_buf; } template <typename type> static void basic_trans_mat_to_c8(const type* input, type* output, const int ldin, const int M, const int K, bool pack_k) { const int m_round = (M + 7) / 8 * 8; int k_round = (K + 7) / 8 * 8; if (!pack_k) { k_round = K; } const int m_loop = m_round / 8; type zero_buf[K]; memset(zero_buf, 0, K * sizeof(type)); for (int i = 0; i < m_loop; ++i) { const type* in0 = input + i * 8 * ldin; const type* in1 = in0 + ldin; const type* in2 = in1 + ldin; const type* in3 = in2 + ldin; const type* in4 = in3 + ldin; const type* in5 = in4 + ldin; const type* in6 = in5 + ldin; const type* in7 = in6 + ldin; if (8 * (i + 1) - M > 0) { switch (8 * (i + 1) - M) { case 7: in1 = zero_buf; case 6: in2 = zero_buf; case 5: in3 = zero_buf; case 4: in4 = zero_buf; case 3: in5 = zero_buf; case 2: in6 = zero_buf; case 1: in7 = zero_buf; default: break; } } for (int j = 0; j < K; ++j) { *output++ = *in0++; *output++ = *in1++; *output++ = *in2++; *output++ = *in3++; *output++ = *in4++; *output++ = *in5++; *output++ = *in6++; *output++ = *in7++; } for (int j = K; j < k_round; ++j) { *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); *output++ = static_cast<type>(0); } } } template <typename type, typename type2> static void basic_gemm_c4(bool trans_a, bool trans_b, int m, int n, int k, type2 alpha, const type* a, int lda, const type* b, int ldb, type2 beta, type2* c, int ldc, const type2* bias, bool flag_bias = false, bool flag_relu = false) { type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2))); memset(tmp_c, 0, m * ldc * sizeof(type2)); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } for (int j = 0; j < n; ++j) { auto sum = static_cast<type2>(0); for (int l = 0; l < k; ++l) { type av; type bv; if (trans_a) { av = a[l * lda + i]; } else { av = a[i * lda + l]; } if (trans_b) { bv = b[j * ldb + l]; } else { bv = b[l * ldb + j]; } sum += av * bv; } type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data; if (flag_relu) { tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0; } else { tmp_c[i * ldc + j] = tmp; } } } //! trans c to c4 basic_trans_mat_to_c4(tmp_c, c, ldc, m, n, false); free(tmp_c); } template <typename type, typename type2> static void basic_gemm_c8(bool trans_a, bool trans_b, int m, int n, int k, type2 alpha, const type* a, int lda, const type* b, int ldb, type2 beta, type2* c, int ldc, const type2* bias, bool flag_bias = false, bool flag_relu = false) { type2* tmp_c = reinterpret_cast<type2*>(malloc(m * ldc * sizeof(type2))); memset(tmp_c, 0, m * ldc * sizeof(type2)); #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } for (int j = 0; j < n; ++j) { auto sum = static_cast<type2>(0); for (int l = 0; l < k; ++l) { type av; type bv; if (trans_a) { av = a[l * lda + i]; } else { av = a[i * lda + l]; } if (trans_b) { bv = b[j * ldb + l]; } else { bv = b[l * ldb + j]; } sum += av * bv; } type2 tmp = alpha * sum + beta * tmp_c[i * ldc + j] + bias_data; if (flag_relu) { tmp_c[i * ldc + j] = tmp > (type2)0 ? tmp : (type2)0; } else { tmp_c[i * ldc + j] = tmp; } } } //! trans c to c4 basic_trans_mat_to_c8(tmp_c, c, ldc, m, n, false); free(tmp_c); } template <typename type, typename type2> static void basic_gemm(bool trans_a, bool trans_b, int m, int n, int k, type2 alpha, const type* a, int lda, const type* b, int ldb, type2 beta, type2* c, int ldc, const type2* bias, bool flag_bias = false, int flag_act = false, float six = 6.f, float leakey_relu_alpha = 1.f, float scale = 6.f, float offset = 3.f, float threshold = 6.f) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } for (int j = 0; j < n; ++j) { auto sum = static_cast<type2>(0); for (int l = 0; l < k; ++l) { type av; type bv; if (trans_a) { av = a[l * lda + i]; } else { av = a[i * lda + l]; } if (trans_b) { bv = b[j * ldb + l]; } else { bv = b[l * ldb + j]; } sum += av * bv; } type2 tmp = alpha * sum + beta * c[i * ldc + j] + bias_data; if (flag_act > 0) { if (flag_act == 1) { // relu c[i * ldc + j] = tmp > static_cast<type2>(0) ? tmp : static_cast<type2>(0); } else if (flag_act == 2) { // relu 6 c[i * ldc + j] = tmp > static_cast<type2>(0) ? tmp : static_cast<type2>(0); c[i * ldc + j] = c[i * ldc + j] < static_cast<type2>(six) ? c[i * ldc + j] : static_cast<type2>(six); } else if (flag_act == 4) { // leaky relu c[i * ldc + j] = tmp < static_cast<type2>(0) ? static_cast<type2>(tmp * leakey_relu_alpha) : tmp; } else if (flag_act == 10) { // hard swish auto tmp1 = tmp + offset; if (tmp1 > 0) { if (tmp1 < threshold) { c[i * ldc + j] = static_cast<type2>(tmp1 * tmp * 1.0 / scale); } else { c[i * ldc + j] = static_cast<type2>(threshold * tmp * 1.0 / scale); } } else { if (threshold > 0) { c[i * ldc + j] = static_cast<type2>(0); } else { c[i * ldc + j] = static_cast<type2>(threshold * tmp * 1.0 / scale); } } } } else { c[i * ldc + j] = tmp; } } } } template <typename type, typename type2> static void basic_gemv(int m, int k, const type* a, const type* b, const type2* bias, type2* c, type2 alpha, type2 beta, bool trans_a = false, bool flag_bias = false, int flag_act = false, float six = 6.f, float leakey_relu_alpha = 1.f, float scale = 6.f, float offset = 3.f, float threshold = 6.f) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for #endif for (int i = 0; i < m; ++i) { auto bias_data = static_cast<type2>(0); if (flag_bias) { bias_data = bias[i]; } auto sum = static_cast<type2>(0); for (int j = 0; j < k; ++j) { type av; if (trans_a) { av = a[j * m + i]; } else { av = a[i * k + j]; } sum += av * b[j]; } type2 tmp = alpha * sum + beta * c[i] + bias_data; if (flag_act > 0) { if (flag_act == 1) { // relu c[i] = tmp > (type2)0 ? tmp : (type2)0; } else if (flag_act == 2) { // relu 6 c[i] = tmp > (type2)0 ? tmp : (type2)0; c[i] = c[i] < six ? c[i] : six; // ut compute } else if (flag_act == 4) { // leakey relu c[i] = tmp < (type2)0 ? (type2)(tmp * leakey_relu_alpha) : tmp; } else if (flag_act == 10) { // hard_swish c[i] = std::min(static_cast<type2>(threshold), std::max(static_cast<type2>(0), static_cast<type2>(tmp + offset))) * static_cast<type2>(tmp * 1.0 / scale); } } else { c[i] = tmp; } } } /** * \brief basic direct convolution function */ //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int //! attention! you need to clean output memory especially using float type template <typename Dtype1, typename Dtype2> static void conv_basic(const Dtype1* din, Dtype2* dout, int num, int chout, int hout, int wout, int chin, int hin, int win, const Dtype1* weights, const Dtype2* bias, int group, int kernel_w, int kernel_h, int stride_w, int stride_h, int dila_w, int dila_h, int pad_w, int pad_h, bool flag_bias, int act_type, float six = 6.f, float scale = 1.f, const float hard_scale = 6.f, const float offset = 3.f, const float threshold = 6.f) { Dtype2 beta = 0; auto src_data = din; auto dst_data_ref = dout; auto weights_data = weights; auto with_bias = flag_bias; auto bias_data = bias; int in_num = num; int out_channels = chout; int out_h = hout; int out_w = wout; int in_channel = chin; int in_h = hin; int in_w = win; int out_c_group = out_channels / group; int in_c_group = in_channel / group; for (int n = 0; n < in_num; ++n) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int g = 0; g < group; ++g) { for (int oc = 0; oc < out_c_group; ++oc) { for (int oh = 0; oh < out_h; ++oh) { for (int ow = 0; ow < out_w; ++ow) { int out_idx = n * group * out_c_group * out_h * out_w + g * out_c_group * out_h * out_w + oc * out_h * out_w + oh * out_w + ow; Dtype2 bias_d = with_bias ? (bias_data[g * out_c_group + oc]) : 0; dst_data_ref[out_idx] = bias_d + dst_data_ref[out_idx] * beta; for (int ic = 0; ic < in_c_group; ++ic) { for (int kh = 0; kh < kernel_h; ++kh) { for (int kw = 0; kw < kernel_w; ++kw) { int iw = ow * stride_w - pad_w + kw * (dila_w); int ih = oh * stride_h - pad_h + kh * (dila_h); if (iw < 0 || iw >= in_w) continue; if (ih < 0 || ih >= in_h) continue; int iidx = n * in_channel * in_h * in_w + g * in_c_group * in_h * in_w + ic * in_h * in_w + ih * in_w + iw; int widx = g * out_c_group * in_c_group * kernel_h * kernel_w + oc * in_c_group * kernel_h * kernel_w + ic * kernel_h * kernel_w + kh * kernel_w + kw; dst_data_ref[out_idx] += src_data[iidx] * weights_data[widx]; } } } if (act_type > 0) { // 1-relu 2-relu6 4-leakyrelu if (act_type == 1) { dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0 ? dst_data_ref[out_idx] : (Dtype2)0; } else if (act_type == 2) { dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0 ? dst_data_ref[out_idx] : (Dtype2)0; dst_data_ref[out_idx] = dst_data_ref[out_idx] < (Dtype2)six ? dst_data_ref[out_idx] : (Dtype2)six; } else if (act_type == 4) { dst_data_ref[out_idx] = dst_data_ref[out_idx] > (Dtype2)0 ? dst_data_ref[out_idx] : (Dtype2)(dst_data_ref[out_idx] * scale); } else if (act_type == 10) { auto tmp = dst_data_ref[out_idx] + offset; auto tmp1 = dst_data_ref[out_idx] * 1.0 / hard_scale; if (tmp > 0) { if (tmp < threshold) { dst_data_ref[out_idx] = static_cast<Dtype2>(tmp * tmp1); } else { dst_data_ref[out_idx] = static_cast<Dtype2>(threshold * tmp1); } } else { if (threshold > 0) { dst_data_ref[out_idx] = static_cast<Dtype2>(0); } else { dst_data_ref[out_idx] = static_cast<Dtype2>(threshold * tmp1); } } } else { printf("this act type: %d does not support \n", act_type); } } } } } } } } template <typename Dtype> static void fill_bias_relu(Dtype* tensor, const Dtype* bias, int channel, int channel_size, bool flag_bias, bool flag_relu) { Dtype* data = tensor; for (int j = 0; j < channel; ++j) { Dtype bias_c = flag_bias ? bias[j] : 0; for (int i = 0; i < channel_size; i++) { data[i] += bias_c; if (flag_relu) { data[i] = data[i] > 0 ? data[i] : 0.f; } } data += channel_size; } } template <typename Dtype> static void do_relu(Dtype* tensor, int size) { for (int j = 0; j < size; ++j) { tensor[j] = tensor[j] > 0 ? tensor[j] : (Dtype)0; } } inline bool is_a_ge_zero_and_a_lt_b(int a, int b) { return static_cast<unsigned>(a) < static_cast<unsigned>(b); } template <typename Dtype> static void col2im(const Dtype* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h0, const int pad_h1, const int pad_w0, const int pad_w1, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype* data_im) { memset(data_im, 0, height * width * channels * sizeof(Dtype)); const int output_h = (height + pad_h0 + pad_h1 - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; const int output_w = (width + pad_w0 + pad_w1 - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; const int channel_size = height * width; for (int channel = channels; channel--; data_im += channel_size) { for (int kernel_row = 0; kernel_row < kernel_h; kernel_row++) { for (int kernel_col = 0; kernel_col < kernel_w; kernel_col++) { int input_row = -pad_h0 + kernel_row * dilation_h; for (int output_rows = output_h; output_rows; output_rows--) { if (!is_a_ge_zero_and_a_lt_b(input_row, height)) { data_col += output_w; } else { int input_col = -pad_w0 + kernel_col * dilation_w; for (int output_col = output_w; output_col; output_col--) { if (is_a_ge_zero_and_a_lt_b(input_col, width)) { data_im[input_row * width + input_col] += *data_col; } data_col++; input_col += stride_w; } } input_row += stride_h; } } } } } //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int template <typename Dtype1, typename Dtype2> void deconv_basic(const Dtype1* din, Dtype2* dout, int num, int chout, int hout, int wout, int chin, int hin, int win, const Dtype1* weights, const Dtype2* bias, int group, int kernel_w, int kernel_h, int stride_w, int stride_h, int dila_w, int dila_h, int pad_w0, int pad_w1, int pad_h0, int pad_h1, bool flag_bias, bool flag_relu) { int m = chout * kernel_w * kernel_h / group; int n = hin * win; int k = chin / group; int group_size_in = win * hin * chin / group; int group_size_coldata = m * n; int group_size_weights = chin * chout * kernel_w * kernel_h / (group * group); bool flag_1x1s1p1 = (kernel_w == 1) && (kernel_h == 1) && (stride_h == 1) && (stride_w == 1) && (pad_w0 == 0) && (pad_h0 == 0) && (pad_w1 == 0) && (pad_h1 == 0) && (dila_w == 1) && (dila_h == 1); Dtype2* workspace_ptr = static_cast<Dtype2*>(malloc(sizeof(float) * m * n * group)); for (int i = 0; i < num; ++i) { const Dtype1* din_batch = din + i * chin * hin * win; Dtype2* dout_batch = dout + i * chout * hout * wout; Dtype2* col_data = workspace_ptr; if (flag_1x1s1p1) { col_data = dout_batch; } memset(col_data, 0, sizeof(Dtype2) * group_size_coldata * group); for (int g = 0; g < group; ++g) { const Dtype1* din_group = din_batch + g * group_size_in; const Dtype1* weights_group = weights + g * group_size_weights; Dtype2* coldata_group = col_data + g * group_size_coldata; basic_gemm<Dtype1, Dtype2>(true, false, m, n, k, 1, weights_group, m, din_group, n, 0, coldata_group, n, nullptr, false, false); } if (!flag_1x1s1p1) { col2im(col_data, chout, hout, wout, kernel_h, kernel_w, pad_h0, pad_h1, pad_w0, pad_w1, stride_h, stride_w, dila_h, dila_w, dout_batch); } //! add bias if (flag_bias || flag_relu) { fill_bias_relu( dout_batch, bias, chout, wout * hout, flag_bias, flag_relu); } } free(workspace_ptr); } float deformable_bilinear(const float* bottom_data, const int data_width, const int height, const int width, float h, float w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; if (h_low >= height - 1) { h_high = h_low = height - 1; h = static_cast<float>(h_low); } else { h_high = h_low + 1; } if (w_low >= width - 1) { w_high = w_low = width - 1; w = static_cast<float>(w_low); } else { w_high = w_low + 1; } float lh = h - h_low; float lw = w - w_low; float hh = 1 - lh; float hw = 1 - lw; float v1 = bottom_data[h_low * data_width + w_low]; float v2 = bottom_data[h_low * data_width + w_high]; float v3 = bottom_data[h_high * data_width + w_low]; float v4 = bottom_data[h_high * data_width + w_high]; float w1 = hh * hw; float w2 = hh * lw; float w3 = lh * hw; float w4 = lh * lw; float val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } //! for float, dtype1 and type2 is float //! for int8, dytpe1 is char, dtype2 is int template <typename Dtype1, typename Dtype2> void deformable_conv_basic(const Dtype1* in_data, const float* offset_data, const float* mask_data, Dtype2* out_data, int num, int chout, int hout, int wout, int chin, int hin, int win, const Dtype1* weights, const Dtype2* bias, int group, int kernel_w, int kernel_h, int stride_w, int stride_h, int dila_w, int dila_h, int pad_w, int pad_h, bool flag_bias, bool flag_relu, bool modulated) { int out_c_group = chout / group; int in_c_group = chin / group; int in_size = hin * win; int out_size = hout * wout; int c_in_size = chin * in_size; int c_out_size = chout * out_size; int kernel_size = kernel_w * kernel_h; for (int n = 0; n < num; n++) { #ifdef PADDLE_WITH_MKLML #pragma omp parallel for collapse(4) #endif for (int g = 0; g < group; ++g) { for (int oc = 0; oc < out_c_group; ++oc) { for (int oh = 0; oh < hout; oh++) { for (int ow = 0; ow < wout; ow++) { int out_idx = n * c_out_size + g * out_c_group * out_size + oc * out_size + oh * wout + ow; Dtype2 bias_d = flag_bias ? bias[g * out_c_group + oc] : 0; out_data[out_idx] = bias_d + out_data[out_idx]; for (int ic = 0; ic < in_c_group; ++ic) { for (int fh = 0; fh < kernel_h; fh++) { for (int fw = 0; fw < kernel_w; fw++) { const float* offset_data_ptr = offset_data + n * group * 2 * kernel_size * out_size + g * 2 * kernel_size * out_size; const int data_offset_h_ptr = ((2 * (fh * kernel_w + fw)) * hout + oh) * wout + ow; const int data_offset_w_ptr = ((2 * (fh * kernel_w + fw) + 1) * hout + oh) * wout + ow; const float offset_h = offset_data_ptr[data_offset_h_ptr]; const float offset_w = offset_data_ptr[data_offset_w_ptr]; const float iw = ow * stride_w - pad_w + kernel_w * dila_w + offset_w; const float ih = oh * stride_h - pad_h + kernel_h * dila_h + offset_h; if (ih >= 0 && ih < hin && iw >= 0 && iw < win) { const float map_h = kernel_h * dila_h + offset_h; const float map_w = kernel_w * dila_w + offset_w; const int cur_height = hin - (oh * stride_h - pad_h); const int cur_width = win - (ow * stride_w - pad_w); const float* in_data_offset = in_data + n * c_in_size + (g * in_c_group + ic) * in_size + (oh * stride_h - pad_h) * win + (ow * stride_w - pad_w); float val = deformable_bilinear(in_data_offset, win, cur_height, cur_width, map_h, map_w); if (modulated) { // use mask const float* mask_ptr = mask_data + n * group * kernel_size * out_size + g * kernel_size * out_size + (fh * kernel_w + fw) * hout * wout + oh * wout + ow; val *= mask_ptr[0]; } int widx = g * out_c_group * in_c_group * kernel_size + oc * in_c_group * kernel_size + ic * kernel_size + fh * kernel_w + fw; out_data[out_idx] += val * weights[widx]; } } } } if (flag_relu) { out_data[out_idx] = out_data[out_idx] > 0 ? out_data[out_idx] : 0; } } } } } } }
DRB064-outeronly2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized. The inner loop has loop carried true data dependence. However, the loop is not parallelized so no race condition. */ #include <omp.h> double b[100][100]; #define N 100 int init() { int i; int j; int k; #pragma omp parallel for private (i,j) for (i = 0; i <= 99; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= 99; j += 1) { b[i][j] = (i * j); } } return 0; } void foo(int n,int m) { int i; int j; #pragma omp parallel for private (i,j) firstprivate (n,m) for (i = 0; i <= n - 1; i += 1) { // Be careful about bounds of j for (j = 1; j <= m - 1; j += 1) { b[i][j] = b[i][j - 1]; } } } int print() { int i; int j; int k; for (i = 0; i <= 99; i += 1) { for (j = 0; j <= 99; j += 1) { printf("%lf\n",b[i][j]); } } return 0; } int main() { init(); foo(100,100); print(); return 0; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(1,ceild(24*t2-Nz+9,4)),3*t1+1),6*t1-6*t2+2);t3<=min(min(min(floord(4*Nt+Ny-9,4),floord(12*t1+Ny+15,4)),floord(24*t2+Ny+11,4)),floord(24*t1-24*t2+Nz+Ny+13,4));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-62,64)),ceild(3*t1-126,128)),ceild(24*t2-Nz-499,512)),ceild(4*t3-Ny-499,512));t4<=min(min(min(min(floord(4*Nt+Nx-9,512),floord(12*t1+Nx+15,512)),floord(24*t2+Nx+11,512)),floord(4*t3+Nx-9,512)),floord(24*t1-24*t2+Nz+Nx+13,512));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(512*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),t3-1),128*t4+126);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(512*t4,4*t5+4); ubv=min(512*t4+511,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
stream.c
/*-----------------------------------------------------------------------*/ /* Program: STREAM */ /* Revision: $Id: stream.c,v 5.10 2013/01/17 16:01:06 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2013: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear, and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <unistd.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /*----------------------------------------------------------------------- * INSTRUCTIONS: * * 1) STREAM requires different amounts of memory to run on different * systems, depending on both the system cache size(s) and the * granularity of the system timer. * You should adjust the value of 'STREAM_ARRAY_SIZE' (below) * to meet *both* of the following criteria: * (a) Each array must be at least 4 times the size of the * available cache memory. I don't worry about the difference * between 10^6 and 2^20, so in practice the minimum array size * is about 3.8 times the cache size. * Example 1: One Xeon E3 with 8 MB L3 cache * STREAM_ARRAY_SIZE should be >= 4 million, giving * an array size of 30.5 MB and a total memory requirement * of 91.5 MB. * Example 2: Two Xeon E5's with 20 MB L3 cache each (using OpenMP) * STREAM_ARRAY_SIZE should be >= 20 million, giving * an array size of 153 MB and a total memory requirement * of 458 MB. * (b) The size should be large enough so that the 'timing calibration' * output by the program is at least 20 clock-ticks. * Example: most versions of Windows have a 10 millisecond timer * granularity. 20 "ticks" at 10 ms/tic is 200 milliseconds. * If the chip is capable of 10 GB/s, it moves 2 GB in 200 msec. * This means the each array must be at least 1 GB, or 128M elements. * * Version 5.10 increases the default array size from 2 million * elements to 10 million elements in response to the increasing * size of L3 caches. The new default size is large enough for caches * up to 20 MB. * Version 5.10 changes the loop index variables from "register int" * to "ssize_t", which allows array indices >2^32 (4 billion) * on properly configured 64-bit systems. Additional compiler options * (such as "-mcmodel=medium") may be required for large memory runs. * * Array size can be set at compile time without modifying the source * code for the (many) compilers that support preprocessor definitions * on the compile line. E.g., * gcc -O -DSTREAM_ARRAY_SIZE=100000000 stream.c -o stream.100M * will override the default size of 10M with a new size of 100M elements * per array. */ #ifndef STREAM_ARRAY_SIZE # define STREAM_ARRAY_SIZE 1000000 #endif /* 2) STREAM runs each kernel "NTIMES" times and reports the *best* result * for any iteration after the first, therefore the minimum value * for NTIMES is 2. * There are no rules on maximum allowable values for NTIMES, but * values larger than the default are unlikely to noticeably * increase the reported performance. * NTIMES can also be set on the compile line without changing the source * code using, for example, "-DNTIMES=7". */ #ifdef NTIMES #if NTIMES<=1 # define NTIMES 10 #endif #endif #ifndef NTIMES # define NTIMES 10 #endif /* Users are allowed to modify the "OFFSET" variable, which *may* change the * relative alignment of the arrays (though compilers may change the * effective offset by making the arrays non-contiguous on some systems). * Use of non-zero values for OFFSET can be especially helpful if the * STREAM_ARRAY_SIZE is set to a value close to a large power of 2. * OFFSET can also be set on the compile line without changing the source * code using, for example, "-DOFFSET=56". */ #ifndef OFFSET # define OFFSET 0 #endif /* * 3) Compile the code with optimization. Many compilers generate * unreasonably bad code before the optimizer tightens things up. * If the results are unreasonably good, on the other hand, the * optimizer might be too smart for me! * * For a simple single-core version, try compiling with: * cc -O stream.c -o stream * This is known to work on many, many systems.... * * To use multiple cores, you need to tell the compiler to obey the OpenMP * directives in the code. This varies by compiler, but a common example is * gcc -O -fopenmp stream.c -o stream_omp * The environment variable OMP_NUM_THREADS allows runtime control of the * number of threads/cores used when the resulting "stream_omp" program * is executed. * * To run with single-precision variables and arithmetic, simply add * -DSTREAM_TYPE=float * to the compile line. * Note that this changes the minimum array sizes required --- see (1) above. * * The preprocessor directive "TUNED" does not do much -- it simply causes the * code to call separate functions to execute each kernel. Trivial versions * of these functions are provided, but they are *not* tuned -- they just * provide predefined interfaces to be replaced with tuned code. * * * 4) Optional: Mail the results to mccalpin@cs.virginia.edu * Be sure to include info that will help me understand: * a) the computer hardware configuration (e.g., processor model, memory type) * b) the compiler name/version and compilation flags * c) any run-time information (such as OMP_NUM_THREADS) * d) all of the output from the test case. * * Thanks! * *-----------------------------------------------------------------------*/ # define HLINE "-------------------------------------------------------------\n" # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif #ifndef STREAM_TYPE #define STREAM_TYPE double #endif static STREAM_TYPE a[STREAM_ARRAY_SIZE+OFFSET], b[STREAM_ARRAY_SIZE+OFFSET], c[STREAM_ARRAY_SIZE+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 2 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE, 3 * sizeof(STREAM_TYPE) * STREAM_ARRAY_SIZE }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(STREAM_TYPE scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(STREAM_TYPE scalar); #endif #ifdef _OPENMP extern int omp_get_num_threads(); #endif int main() { int quantum, checktick(); int BytesPerWord; int k; ssize_t j; STREAM_TYPE scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); printf("STREAM version $Revision: 5.10 $\n"); printf(HLINE); BytesPerWord = sizeof(STREAM_TYPE); printf("This system uses %d bytes per array element.\n", BytesPerWord); printf(HLINE); #ifdef N printf("***** WARNING: ******\n"); printf(" It appears that you set the preprocessor variable N when compiling this code.\n"); printf(" This version of the code uses the preprocesor variable STREAM_ARRAY_SIZE to control the array size\n"); printf(" Reverting to default value of STREAM_ARRAY_SIZE=%llu\n",(unsigned long long) STREAM_ARRAY_SIZE); printf("***** WARNING: ******\n"); #endif printf("Array size = %llu (elements), Offset = %d (elements)\n" , (unsigned long long) STREAM_ARRAY_SIZE, OFFSET); printf("Memory per array = %.1f MiB (= %.1f GiB).\n", BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0), BytesPerWord * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.0/1024.0)); printf("Total memory required = %.1f MiB (= %.1f GiB).\n", (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024.), (3.0 * BytesPerWord) * ( (double) STREAM_ARRAY_SIZE / 1024.0/1024./1024.)); printf("Each kernel will be executed %d times.\n", NTIMES); printf(" The *best* time for each kernel (excluding the first iteration)\n"); printf(" will be used to compute the reported bandwidth.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel { #pragma omp master { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } } #endif #ifdef _OPENMP k = 0; #pragma omp parallel #pragma omp atomic k++; printf ("Number of Threads counted = %i\n",k); #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) { a[j] = 1.0; b[j] = 2.0; c[j] = 0.0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else { printf("Your clock granularity appears to be " "less than one microsecond.\n"); quantum = 1; } t = mysecond(); #pragma omp parallel for for (j = 0; j < STREAM_ARRAY_SIZE; j++) a[j] = 2.0E0 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3.0; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #ifdef TUNED tuned_STREAM_Copy(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; #endif times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); #ifdef TUNED tuned_STREAM_Scale(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; #endif times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #ifdef TUNED tuned_STREAM_Add(); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; #endif times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #ifdef TUNED tuned_STREAM_Triad(scalar); #else #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; #endif times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MIN(mintime[j], times[j][k]); maxtime[j] = MAX(maxtime[j], times[j][k]); } } printf("Function Best Rate MB/s Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%12.1f %11.6f %11.6f %11.6f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MIN(minDelta, MAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } #ifndef abs #define abs(a) ((a) >= 0 ? (a) : -(a)) #endif void checkSTREAMresults () { STREAM_TYPE aj,bj,cj,scalar; STREAM_TYPE aSumErr,bSumErr,cSumErr; STREAM_TYPE aAvgErr,bAvgErr,cAvgErr; double epsilon; ssize_t j; int k,ierr,err; /* reproduce initialization */ aj = 1.0; bj = 2.0; cj = 0.0; /* a[] is modified during timing check */ aj = 2.0E0 * aj; /* now execute timing loop */ scalar = 3.0; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } /* accumulate deltas between observed and expected results */ aSumErr = 0.0; bSumErr = 0.0; cSumErr = 0.0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { aSumErr += abs(a[j] - aj); bSumErr += abs(b[j] - bj); cSumErr += abs(c[j] - cj); // if (j == 417) printf("Index 417: c[j]: %f, cj: %f\n",c[j],cj); // MCCALPIN } aAvgErr = aSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; bAvgErr = bSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; cAvgErr = cSumErr / (STREAM_TYPE) STREAM_ARRAY_SIZE; if (sizeof(STREAM_TYPE) == 4) { epsilon = 1.e-6; } else if (sizeof(STREAM_TYPE) == 8) { epsilon = 1.e-13; } else { printf("WEIRD: sizeof(STREAM_TYPE) = %lu\n",sizeof(STREAM_TYPE)); epsilon = 1.e-6; } err = 0; if (abs(aAvgErr/aj) > epsilon) { err++; printf ("Failed Validation on array a[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",aj,aAvgErr,abs(aAvgErr)/aj); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(a[j]/aj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array a: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,aj,a[j],abs((aj-a[j])/aAvgErr)); } #endif } } printf(" For array a[], %d errors were found.\n",ierr); } if (abs(bAvgErr/bj) > epsilon) { err++; printf ("Failed Validation on array b[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",bj,bAvgErr,abs(bAvgErr)/bj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(b[j]/bj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array b: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,bj,b[j],abs((bj-b[j])/bAvgErr)); } #endif } } printf(" For array b[], %d errors were found.\n",ierr); } if (abs(cAvgErr/cj) > epsilon) { err++; printf ("Failed Validation on array c[], AvgRelAbsErr > epsilon (%e)\n",epsilon); printf (" Expected Value: %e, AvgAbsErr: %e, AvgRelAbsErr: %e\n",cj,cAvgErr,abs(cAvgErr)/cj); printf (" AvgRelAbsErr > Epsilon (%e)\n",epsilon); ierr = 0; for (j=0; j<STREAM_ARRAY_SIZE; j++) { if (abs(c[j]/cj-1.0) > epsilon) { ierr++; #ifdef VERBOSE if (ierr < 10) { printf(" array c: index: %ld, expected: %e, observed: %e, relative error: %e\n", j,cj,c[j],abs((cj-c[j])/cAvgErr)); } #endif } } printf(" For array c[], %d errors were found.\n",ierr); } if (err == 0) { printf ("Solution Validates: avg error less than %e on all three arrays\n",epsilon); } #ifdef VERBOSE printf ("Results Validation Verbose Results: \n"); printf (" Expected a(1), b(1), c(1): %f %f %f \n",aj,bj,cj); printf (" Observed a(1), b(1), c(1): %f %f %f \n",a[1],b[1],c[1]); printf (" Rel Errors on a, b, c: %e %e %e \n",abs(aAvgErr/aj),abs(bAvgErr/bj),abs(cAvgErr/cj)); #endif } #ifdef TUNED /* stubs for "tuned" versions of the kernels */ void tuned_STREAM_Copy() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]; } void tuned_STREAM_Scale(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(STREAM_TYPE scalar) { ssize_t j; #pragma omp parallel for for (j=0; j<STREAM_ARRAY_SIZE; j++) a[j] = b[j]+scalar*c[j]; } /* end of stubs for the "tuned" versions of the kernels */ #endif
target.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu #include <stdio.h> int main() { int i; // CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]] fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: i) #pragma omp target map(present, alloc: i) ; // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target map(present, alloc: i) ; // CHECK-NOT: i is present fprintf(stderr, "i is present\n"); return 0; }
detection.c
//for ssd output detection fprop //conf is a matrix with len*4*bs //loc is a matrix with len*num_class*bs //result is a matrix with bs #include <stdlib.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <mkl_trans.h> #include <omp.h> //self.bbox_transform_inv(prior_boxes, loc_view[:, :, k], self.proposals) void bbox_transform_inv( float* boxes, //boxes float* deltas, //location float* output, //output, num_boxes*4 const long num_boxes) { for(long i=0; i<num_boxes; ++i) { const float var0 = 0.1; const float var1 = 0.1; const float var2 = 0.2; const float var3 = 0.2; const long index = i * 4; const float widths = boxes[index+2] - boxes[index]; const float heights = boxes[index+3] - boxes[index+1]; const float ctr_x = boxes[index] + 0.5 * widths; const float ctr_y = boxes[index+1] + 0.5 * heights; const float dx = deltas[index]; const float dy = deltas[index+1]; const float dw = deltas[index+2]; const float dh = deltas[index+3]; const float pred_ctr_x = var0 * dx * widths + ctr_x; const float pred_ctr_y = var1 * dy * heights + ctr_y; const float pred_w = exp(var2 * dw) * widths; const float pred_h = exp(var3 * dh) * heights; output[index] = pred_ctr_x - 0.5 * pred_w; output[index+1] = pred_ctr_y - 0.5 * pred_h; output[index+2] = pred_ctr_x + 0.5 * pred_w; output[index+3] = pred_ctr_y + 0.5 * pred_h; } } void softmax(float* input, int N, long long len) { float* inPtr = (float*)input; for (int i = 0; i < N; ++i) { float *pTemp = inPtr + i * len; float pMax = pTemp[0]; for(long long j = 0; j < len; ++j) { if (pMax < pTemp[j]) { pMax = pTemp[j]; } } float pSum = 0.0f; for(long long j=0; j<len; ++j) { pTemp[j] = exp(pTemp[j] - pMax); pSum += pTemp[j]; } for(long long j=0; j < len; ++j) { pTemp[j] = pTemp[j] / pSum; } } } //find first N out of length long get_top_N_index( float* scores, const long N, const long length, const float threshold, long* index) { for(long i=0; i<length; ++i) index[i] = i; long num = (length<N) ? length:N; for(long i=0; i<num; ++i) { //find max score and store in index[i] for(long j=i+1; j<length; ++j) { if(scores[i]<scores[j]) { float temp = scores[i]; scores[i] = scores[j]; scores[j] = temp; long temp_index = index[i]; index[i] = index[j]; index[j] = temp_index; } } } if(threshold>0) { long i = 0; for( ; i<num; ++i) { if(scores[i]<=threshold) break; } return i; } return num; } long nms(float* detection, long* index_sort, const float threshold, int normalized, const long N) { float offset = (normalized) ? 0:1; int* out = malloc(N*sizeof(int)); //record swapped out choice float* area_vec = malloc(N*sizeof(float)); for(long i=0; i<N; ++i) out[i] = 0; long i = 0; for(; i<N; ++i) { const float score = detection[i*5+4]; if(score<=0) break; const float x1 = detection[i*5]; const float y1 = detection[i*5+1]; const float x2 = detection[i*5+2]; const float y2 = detection[i*5+3]; area_vec[i] = (x2 - x1 + offset) * (y2 - y1 + offset); } long positive_len = i; //non-zero scores 0 ~ positive_len-1 long result_len = 0; i = 0; while(i<positive_len) { if(out[i]) //this choice is kicked out { i++; continue; } index_sort[result_len] = i; result_len++; for(long j=i+1; j<positive_len; ++j) { if(out[j]) continue; const float x11 = detection[i*5]; const float y11 = detection[i*5+1]; const float x12 = detection[i*5+2]; const float y12 = detection[i*5+3]; const float x21 = detection[j*5]; const float y21 = detection[j*5+1]; const float x22 = detection[j*5+2]; const float y22 = detection[j*5+3]; const float xx1 = (x11>x21) ? x11:x21; const float yy1 = (y11>y21) ? y11:y21; const float xx2 = (x12<x22) ? x12:x22; const float yy2 = (y12<y22) ? y12:y22; float w = xx2 - xx1 + offset; float h = yy2 - yy1 + offset; w = ( w > 0) ? w : 0; h = ( h > 0) ? h : 0; const float inter = w * h; const float ovr = inter / (area_vec[i] + area_vec[j] - inter); if (ovr > threshold) out[j] = 1; } i++; } free(out); free(area_vec); return result_len; } long detection_fprop( float* conf, //score for each class for each box, num_box * num_class * bs float* loc, //location for each box, box * 4 * bs float* res_detection, //final memory restoring boxes, bs * top_k float* prior_boxes, //num_boxes * 4 long * res_batch_len, //record count of result for each batch, bs const long num_boxes, //num_boxes, each is a potential object const long num_class, //number of class const long bs, //batch size const long nms_topk, //first top k box for nms result for each class const long image_topk, //first top k box for input image const float score_threshold, //threshold for accepting as a object for box const float nms_threshold) //threshold for two overlapped boxes, too overlapped is one object { //sorted result of index long* index_batch = malloc(bs*num_boxes*num_class*sizeof(long)); //scores to be sorted float* scores_batch = malloc(bs*num_boxes*num_class*sizeof(float)); //temp result detections for each batch, grow when iterating among classes float* temp_res_detection_batch = malloc(bs*num_class*nms_topk*6*sizeof(float)); //internal memory to restore sorted boxes for each class float* internal_detection_batch = malloc(bs*nms_topk*5*sizeof(float)); //internal memory to restore transformed location float* proposal_batch = malloc(bs*num_boxes*4*sizeof(float)); //transpose KLN to NKL float* conf_t = malloc(num_boxes * num_class * bs * sizeof(float)); float* loc_t = malloc(num_boxes * 4* bs * sizeof(float)); mkl_somatcopy('r', 't', num_boxes*num_class, bs, 1.0, conf, bs, conf_t, num_boxes*num_class); mkl_somatcopy('r', 't', num_boxes*4, bs, 1.0, loc, bs, loc_t, num_boxes*4); //loop for batch size #pragma omp parallel for for(long b=0; b<bs; ++b) //loop for batch { float* scores = scores_batch + b * num_boxes*num_class; float* temp_res_detection = temp_res_detection_batch + b * num_class*nms_topk*6; long* index = index_batch + b * num_boxes*num_class; float* internal_detection = internal_detection_batch + b * nms_topk*5; float* proposal = proposal_batch + b * num_boxes*4; //calculate class scores for this batch using softmax float* conf_batch = conf_t + b * num_boxes * num_class; softmax(conf_batch, num_boxes, num_class); //store scores in an array mkl_somatcopy('r', 't', num_boxes, num_class, 1.0, conf_batch, num_class, scores, num_boxes); //transform locations in proposal bbox_transform_inv(prior_boxes, loc_t + b * 4 * num_boxes, proposal, num_boxes); long res_len = 0; //count of feasible boxes for this image for(long c=1; c<num_class; ++c) //loop for classes { //for each class, sort out first nms_topk boxes, store result in index long sort_nums_res = get_top_N_index(scores + c*num_boxes, nms_topk, num_boxes, score_threshold, index); //store location and score for the sorted results if(sort_nums_res > 0) { //store location and score in internal_detection for overlapped check for(long i=0; i<sort_nums_res; ++i) { for(long j=0; j<4; ++j) internal_detection[i*5+j] = proposal[index[i]*4+j]; internal_detection[i*5+4] = scores[c*num_boxes+i]; } //remove overlapped box sort_nums_res = nms(internal_detection, index, nms_threshold, 1, sort_nums_res); //store result in temp memory and add class number, thus width is 6 for(long i=0; i<sort_nums_res; ++i) { float* temp = temp_res_detection + (res_len+i)*6; for(long j=0; j<5; ++j) { temp[j] = internal_detection[index[i]*5+j]; } //add class number temp[5] = c; } res_len += sort_nums_res; } } //sort out first top_k boxes for this image for(long i=0; i<res_len; ++i) { scores[i] = temp_res_detection[i*6+4]; index[i] = i; } long sort_nums_res = res_len; if(sort_nums_res>image_topk) //sort first top_k out of res_len { sort_nums_res = get_top_N_index(scores, image_topk, res_len, 0.0, index); } //store sorted result in final output float* temp = res_detection + b * image_topk * 6; for(long i=0; i<sort_nums_res; ++i) { for(long j=0; j<6; ++j) { temp[i*6+j] = temp_res_detection[index[i]*6+j]; } } res_batch_len[b] = sort_nums_res; } free(conf_t); free(loc_t); free(index_batch); free(scores_batch); free(temp_res_detection_batch); free(proposal_batch); free(internal_detection_batch); }
mesh-amit.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <complex.h> #include <sys/types.h> #include <time.h> #include <fftw3.h> #include "globalvars.h" #include "sharedvars.h" #include "prototype.h" #include "mesh.h" /*****************************************************************************/ void init_normal_beads (double normal[NTRIANGLE_MAX][3], double nrm[beads][3], int ipart) { double wx[beads], wy[beads], wz[beads]; int i; int i1,i2,i3; int itriangle; double norm; /* Initialize weights for the global solution */ for(i=0;i<beads;i++) { wx[i]=0.0; wy[i]=0.0; wz[i]=0.0; } /* Store weights for the global calculation */ /* Also used for computing the area weighted normal average for each bead */ for(itriangle=0;itriangle<ntriangles;itriangle++) { i1 = triangle[itriangle][0]; i2 = triangle[itriangle][1]; i3 = triangle[itriangle][2]; wx[i1] += area[ipart][itriangle]/3.0*normal[itriangle][0]; wx[i2] += area[ipart][itriangle]/3.0*normal[itriangle][0]; wx[i3] += area[ipart][itriangle]/3.0*normal[itriangle][0]; wy[i1] += area[ipart][itriangle]/3.0*normal[itriangle][1]; wy[i2] += area[ipart][itriangle]/3.0*normal[itriangle][1]; wy[i3] += area[ipart][itriangle]/3.0*normal[itriangle][1]; wz[i1] += area[ipart][itriangle]/3.0*normal[itriangle][2]; wz[i2] += area[ipart][itriangle]/3.0*normal[itriangle][2]; wz[i3] += area[ipart][itriangle]/3.0*normal[itriangle][2]; normal_t[ipart][itriangle][0] = normal[itriangle][0]; normal_t[ipart][itriangle][1] = normal[itriangle][1]; normal_t[ipart][itriangle][2] = normal[itriangle][2]; } /*Computing the area weighted normal average for each bead */ for(i=0;i<beads;i++) { norm = wx[i]*wx[i] + wy[i]*wy[i] + wz[i]*wz[i]; norm = sqrt(norm); nrm[i][0] = wx[i]/norm; nrm[i][1] = wy[i]/norm; nrm[i][2] = wz[i]/norm; } } /*****************************************************************************/ void compute_normal_curvature (double nrm[beads][3], double curv[beads], double xb[beads][3], int beads_to_beads[beads][6]) { int i,j,k; int ibead,jbead; int i1,i2,i3; int itriangle; double norm; double xneigh[6][3]; double x_tr[6][3]; double Ct[3][3]; double u[3],Q[3][3]; double xp[3],yp[3],zp[3]; double A1[5][5],brhs1[5],rdist1[5]; double A2[6][6],brhs2[6],rdist2[6]; int INFO, N, NRHS, IPIV[6],LDA, LDB, M, RANK; int LWORK=50; double WORK[50]; double RCOND=1E-10; double S[5]; double nrm_l[3],nrm_g[3]; double error; FILE *fp; /*fp = fopen("nrm.txt","w+"); for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { fprintf(fp,"%E\t",nrm[ibead][i]); } fprintf(fp,"\n"); } fclose(fp);*/ /* First for the first 12 vertices with coordination number 5 */ for(ibead=0;ibead<12;ibead++) { /* store coordinates of the connected beads relative to ibead */ for(i=0;i<5;i++) { jbead = beads_to_beads[ibead][i]; for(j=0;j<3;j++) { xneigh[i][j] = xb[jbead][j]-xb[ibead][j]; } } do { /* Store local z axis coordinates in global frame */ zp[0] = nrm[ibead][0]; zp[1] = nrm[ibead][1]; zp[2] = nrm[ibead][2]; /* find the rotation matrix: Use householder's algorithm */ if (fabs(zp[2]-1) > 1E-10) { u[0] = 0.0 - zp[0]; u[1] = 0.0 - zp[1]; u[2] = 1.0 - zp[2]; norm = sqrt(u[0]*u[0]+u[1]*u[1]+u[2]*u[2]); /* normalize */ u[0] = u[0]/norm; u[1] = u[1]/norm; u[2] = u[2]/norm; /* Find the Q matrix */ for(i=0;i<3;i++) { for(j=0;j<3;j++) { if (i==j) { Q[i][j] = 1.0 - 2.0*u[i]*u[j]; } else { Q[i][j] = -2.0*u[i]*u[j]; } } } } else // no rotation necessary Q=I { for(i=0;i<3;i++) { for(j=0;j<3;j++) { if (i==j) { Q[i][j] = 1.0; } else { Q[i][j] = 0.0; } } } } /* find coordinates of the local x axis in global system = Q^T*(1,0,0) */ xp[0] = Q[0][0]; xp[1] = Q[1][0]; xp[2] = Q[2][0]; /* find yp = z * x*/ cross_product(zp,xp,yp); /* compute the coordinate transformation matrix: C^T */ for(i=0;i<3;i++) { Ct[0][i] = xp[i]; Ct[1][i] = yp[i]; Ct[2][i] = zp[i]; } /* transform coordinates of neighboring beads to local coordinates */ for(i=0;i<5;i++) { for(j=0;j<3;j++) { x_tr[i][j] = 0.0; for(k=0;k<3;k++) { x_tr[i][j] += Ct[j][k]*xneigh[i][k]; } } } /* Find the distance from the center */ for(i=0;i<5;i++) { rdist1[i] = sqrt(x_tr[i][0]*x_tr[i][0] + x_tr[i][1]*x_tr[i][1] + x_tr[i][2]*x_tr[i][2]); } /* build the A matrix, store in fortran column major format */ for(i=0;i<5;i++) { A1[0][i] = x_tr[i][0]/rdist1[i]; A1[1][i] = x_tr[i][1]/rdist1[i]; A1[2][i] = x_tr[i][0]*x_tr[i][0]/rdist1[i]; A1[3][i] = x_tr[i][0]*x_tr[i][1]/rdist1[i]; A1[4][i] = x_tr[i][1]*x_tr[i][1]/rdist1[i]; brhs1[i] = x_tr[i][2]/rdist1[i]; } /* Solve using dgesv */ N=5;NRHS=1;LDA=5;LDB=5; dgesv_(&N,&NRHS,A1,&LDA,IPIV,brhs1,&LDB,&INFO); /* find new normal in local coordinates */ norm = sqrt(1.0 + brhs1[0]*brhs1[0] + brhs1[1]*brhs1[1]); nrm_l[0] = -brhs1[0]/norm; nrm_l[1] = -brhs1[1]/norm; nrm_l[2] = 1.0/norm; /* transform to global coordinates */ for(i=0;i<3;i++) { nrm_g[i]=0.0; for(j=0;j<3;j++) { nrm_g[i] += Ct[j][i]*nrm_l[j]; } } /* find error */ error=0.0; for(i=0;i<3;i++) { error += pow(zp[i]-nrm_g[i],2); } error = sqrt(error); /* store in normal array */ for(i=0;i<3;i++) { nrm[ibead][i] = nrm_g[i]; } } while (error > 1E-3); /* store curvature */ curv[ibead] = -brhs1[2] - brhs1[4]; // printf("%d\t%E\n",ibead,curv[ibead]); } /* Remaining vertices with coordination number 6 */ for(ibead=12;ibead<beads;ibead++) { /* store coordinates of the connected beads relative to ibead */ for(i=0;i<6;i++) { jbead = beads_to_beads[ibead][i]; for(j=0;j<3;j++) { xneigh[i][j] = xb[jbead][j]-xb[ibead][j]; } } do { /* Store zp axis coordinates */ zp[0] = nrm[ibead][0]; zp[1] = nrm[ibead][1]; zp[2] = nrm[ibead][2]; if (fabs(zp[2]-1) > 1E-10) { u[0] = 0.0 - zp[0]; u[1] = 0.0 - zp[1]; u[2] = 1.0 - zp[2]; norm = sqrt(u[0]*u[0]+u[1]*u[1]+u[2]*u[2]); /* normalize */ u[0] = u[0]/norm; u[1] = u[1]/norm; u[2] = u[2]/norm; /* Find the Q matrix */ for(i=0;i<3;i++) { for(j=0;j<3;j++) { if (i==j) { Q[i][j] = 1.0 - 2.0*u[i]*u[j]; } else { Q[i][j] = -2.0*u[i]*u[j]; } } } } else // no rotation necessary Q=I { for(i=0;i<3;i++) { for(j=0;j<3;j++) { if (i==j) { Q[i][j] = 1.0; } else { Q[i][j] = 0.0; } } } } /* find coordinates of xp axis in global system */ xp[0] = Q[0][0]; xp[1] = Q[1][0]; xp[2] = Q[2][0]; /* find yp = z * x*/ cross_product(zp,xp,yp); /* compute the coordinate transformation matrix: C^T */ for(i=0;i<3;i++) { Ct[0][i] = xp[i]; Ct[1][i] = yp[i]; Ct[2][i] = zp[i]; } /* transform coordinates of neighboring beads to local coordinates */ for(i=0;i<6;i++) { for(j=0;j<3;j++) { x_tr[i][j] = 0.0; for(k=0;k<3;k++) { x_tr[i][j] += Ct[j][k]*xneigh[i][k]; } } } /* Find the distance from the center */ for(i=0;i<6;i++) { rdist2[i] = sqrt(x_tr[i][0]*x_tr[i][0] + x_tr[i][1]*x_tr[i][1] + x_tr[i][2]*x_tr[i][2]); } /* build the A matrix, store in fortran column major format */ for(i=0;i<6;i++) { A2[0][i] = x_tr[i][0]/rdist2[i]; A2[1][i] = x_tr[i][1]/rdist2[i]; A2[2][i] = x_tr[i][0]*x_tr[i][0]/rdist2[i]; A2[3][i] = x_tr[i][0]*x_tr[i][1]/rdist2[i]; A2[4][i] = x_tr[i][1]*x_tr[i][1]/rdist2[i]; brhs2[i] = x_tr[i][2]/rdist2[i]; } /* Solve using dgelss */ M=6;N=5;NRHS=1;LDA=M;LDB=M; dgelss_(&M,&N,&NRHS,A2,&LDA,brhs2,&LDB,S,&RCOND,&RANK,WORK,&LWORK,&INFO); /* find new normal in local coordinates */ norm = sqrt(1.0 + brhs2[0]*brhs2[0] + brhs2[1]*brhs2[1]); nrm_l[0] = -brhs2[0]/norm; nrm_l[1] = -brhs2[1]/norm; nrm_l[2] = 1.0/norm; /* transform to global coordinates */ for(i=0;i<3;i++) { nrm_g[i]=0.0; for(j=0;j<3;j++) { nrm_g[i] += Ct[j][i]*nrm_l[j]; } } /* find error */ error=0.0; for(i=0;i<3;i++) { error += pow(zp[i]-nrm_g[i],2); } error = sqrt(error); /* store in normal array */ for(i=0;i<3;i++) { nrm[ibead][i] = nrm_g[i]; } } while (error > 1E-3); /* store curvature */ curv[ibead] = -brhs2[2] - brhs2[4]; // printf("%d\t%E\n",ibead,curv[ibead]); } /*fp = fopen("nrm1.txt","w+"); for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { fprintf(fp,"%E\t",nrm[ibead][i]); } fprintf(fp,"\n"); } fclose(fp); exit(0);*/ } /*****************************************************************************/ /*------------------------------------------------------------- * Computes the solid angle subtended by a triangle at * one of its own vertex (see ieee transactions of biomedical * engineering, 45, 980, 1998) - *------------------------------------------------------------*/ void solid_angle(double *Omega, double x1[3],double x2[3], double x3[3], double n[3]) { int i,j,k; double pi; double x[3],y[3]; double z[3]; double ndotx,ndoty,ndotz,xdoty; double xm,ym; double nr,dr; pi = 4.0*atan(1.0); /* find sides of the triangle relative to the origin vertex */ for(i=0;i<3;i++) { x[i]=x2[i]-x1[i]; y[i]=x3[i]-x1[i]; } cross_product(x,y,z); xm = sqrt(dot_product(3,x,x)); ym = sqrt(dot_product(3,y,y)); ndotx = dot_product(3,n,x); ndoty = dot_product(3,n,y); ndotz = dot_product(3,n,z); xdoty = dot_product(3,x,y); nr = -2.0*ndotz*(ndotx*ym + ndoty*xm); dr = pow(xm*ym + xdoty,2) - pow(ndotx*ym + ndoty*xm,2) + pow(ndotz,2); *Omega = atan(nr/dr); } /******************************************************************************/ /*----------------------------------------------------------------------------------- uin = coefficient of the Green's function uout = output of the double layer integral -------------------------------------------------------------------------------------*/ void prvec_double_layer(double xb[npart][beads][3],double uin[npart][beads][3], double nrm[npart][beads][3], double ub[npart][beads][3]) { int i,j,k,ibead; int itriangle,jtriangle; double xi[3]; int i1,i2,i3; double u[3],u1[3]; double w[npart][beads]; double uxf[nx][ny][nz], uyf[nx][ny][nz], uzf[nx][ny][nz],pf[nx][ny][nz]; double ubc1[nx][ny][3], ubc2[nx][ny][3]; double gaussx[nx][ny][nz],gaussy[nx][ny][nz],gaussz[nx][ny][nz],gaussz_p[nx][ny][nz]; FILE *fp; char name[20]; double fb[beads][3]; double sigma_g[nx][ny][nz][3][3]; double pi; double uint[beads][3]; int ix,iy,iz; double dx,dy; int member; int ipart,jpart; double Omega; double dudx[nx][ny][nz],dudy[nx][ny][nz],dudz[nx][ny][nz]; double dvdx[nx][ny][nz],dvdy[nx][ny][nz],dvdz[nx][ny][nz]; double dwdx[nx][ny][nz],dwdy[nx][ny][nz],dwdz[nx][ny][nz]; pi = 4.0*atan(1.0); /* Mesh size */ dx = Lx/nx; dy = Ly/ny; /* Initialize velocity to zero */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<beads;i++) { for(j=0;j<3;j++) { ub[ipart][i][j]=0.0; } } } /* Initialize weights for the global solution */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<beads;i++) { w[ipart][i]=0.0; } } /* Store weights for the global calculation */ for(ipart=0;ipart<npart;ipart++) { for(itriangle=0;itriangle<ntriangles;itriangle++) { i1 = triangle[itriangle][0]; i2 = triangle[itriangle][1]; i3 = triangle[itriangle][2]; w[ipart][i1] += area[ipart][itriangle]/3.0; w[ipart][i2] += area[ipart][itriangle]/3.0; w[ipart][i3] += area[ipart][itriangle]/3.0; } } #if (OMP == 1) omp_set_num_threads(NTHREADS); #endif /* Sum contribution from the self element at each of the nodes*/ /* Also find the weight for each node for the global solution computaiton */ #if (npart > 1) #if (OMP == 1) #pragma omp parallel for private(ipart,jpart,ibead,j,itriangle,jtriangle,xi,i1,i2,i3,member,u,Omega) #endif #endif for(ipart=0;ipart<npart;ipart++) { #if (npart == 1) #if (OMP == 1) #pragma omp parallel for private(jpart,ibead,j,itriangle,jtriangle,xi,i1,i2,i3,member,u,Omega) #endif #endif for(ibead=0;ibead<beads;ibead++) { /* Store position of the field point */ for(i=0;i<3;i++) { xi[i] = xb[ipart][ibead][i]; } /* compute velocity at this point due to all triangular elements */ for(itriangle=0;itriangle<triangle_count[ipart][ibead];itriangle++) { jpart = triangle_list[ipart][ibead][itriangle][0]; jtriangle = triangle_list[ipart][ibead][itriangle][1]; i1 = triangle[jtriangle][0]; i2 = triangle[jtriangle][1]; i3 = triangle[jtriangle][2]; member=0; // printf("%lf\n",area[triangle_list[ibead][itriangle]]); if (ibead == i1 && jpart == ipart) { member=1; } else if (ibead == i2 && jpart == ipart) { i2=i1; i1 = ibead; member=1; } else if (ibead == i3 && jpart == ipart) { i3=i1; i1=ibead; member=1; } /* Perform the integration */ if (member==0) { integrate5(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,uin[jpart][i1],uin[jpart][i2],uin[jpart][i3], u,alpha,nrm[ipart][ibead]); } else { if (SOLID_ANGLE == 1) { /* compute solid angle */ solid_angle(&Omega,xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],nrm[ipart][ibead]); integrate5c(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,uin[jpart][i1], uin[jpart][i2],uin[jpart][i3],u,alpha,normal_t[ipart][jtriangle],Omega); } else { /* using normal of the triangle as the normal of ibead, this makes it non-singular */ integrate5a(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,uin[jpart][i1],uin[jpart][i2], uin[jpart][i3],u,alpha,nrm[ipart][ibead],normal_t[ipart][jtriangle]); } } /* Sum to total velocity */ for(i=0;i<3;i++) { ub[ipart][ibead][i] += u[i]; } } } } /*----------------------- Global Solution Calculation ------------------------------*/ /*----- Boundary Conditions ---- */ /* Initialize */ for(i=0;i<nx;i++) { for(j=0;j<ny;j++) { for(k=0;k<3;k++) { ubc1[i][j][k]=0.0; ubc2[i][j][k]=0.0; } } } /* set ubc = - u_l for poiseuille flow */ /* Top plate */ #if (OMP == 1) #pragma omp parallel for private(ix,iy,xi,itriangle,jpart,jtriangle,i1,i2,i3,member,u) #endif for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { xi[0] = ix*dx; xi[1] = iy*dy; xi[2] = Lz; for(itriangle=0;itriangle<triangle_count_t[ix][iy];itriangle++) { jpart = triangle_list_t[ix][iy][itriangle][0]; jtriangle = triangle_list_t[ix][iy][itriangle][1]; i1 = triangle[jtriangle][0]; i2 = triangle[jtriangle][1]; i3 = triangle[jtriangle][2]; member=0; // performing non-singular integral as of now if (member == 1) { integrate2(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,uin[jpart][i1],uin[jpart][i2],uin[jpart][i3],u,alpha); } else { integrate1(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,uin[jpart][i1],uin[jpart][i2],uin[jpart][i3],u,alpha); } for(i=0;i<3;i++) { ubc2[ix][iy][i] -= u[i]/8/pi; } } } } /* Bottom plate */ #if (OMP == 1) #pragma omp parallel for private(ix,iy,xi,itriangle,jpart,jtriangle,i1,i2,i3,member,u) #endif for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { xi[0] = ix*dx; xi[1] = iy*dy; xi[2] = 0.0; for(itriangle=0;itriangle<triangle_count_b[ix][iy];itriangle++) { jpart = triangle_list_b[ix][iy][itriangle][0]; jtriangle = triangle_list_b[ix][iy][itriangle][1]; i1 = triangle[jtriangle][0]; i2 = triangle[jtriangle][1]; i3 = triangle[jtriangle][2]; member=0; // performing non-singular integral as of now if (member == 1) { integrate2(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,uin[jpart][i1],uin[jpart][i2],uin[jpart][i3],u,alpha); } else { integrate1(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,uin[jpart][i1],uin[jpart][i2],uin[jpart][i3],u,alpha); } for(i=0;i<3;i++) { ubc1[ix][iy][i] -= u[i]/8/pi; } } } } /* Distribute density to mesh */ /* Initialize mesh force density */ for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { for(iz=0;iz<nz;iz++) { gaussx[ix][iy][iz] = 0.0; gaussy[ix][iy][iz] = 0.0; gaussz[ix][iy][iz] = 0.0; gaussz_p[ix][iy][iz] = 0.0; } } } for(ipart=0;ipart<npart;ipart++) { distribute_density(xb[ipart],uin[ipart],gaussx,gaussy,gaussz,gaussz_p,w[ipart],alpha,xz); } /* solve for the velocity and pressure at the mesh points */ global_velocity_inhomogeneous(gaussx,gaussy,gaussz,gaussz_p,ubc1,ubc2,uxf,uyf,uzf,pf, dudx,dudy,dudz,dvdx,dvdy,dvdz,dwdx,dwdy,dwdz, xxi,cz, u1H,v1H,w1H,dwdz1H,dudz1H,dvdz1H,p1H, u2H,v2H,w2H,dwdz2H,dudz2H,dvdz2H,p2H); /* Compute the global stress tensor at the mesh points */ #if (STRESS_HI == 1) compute_global_stress_tensor_spectral(dudx,dudy,dudz,dvdx,dvdy,dvdz, dwdx,dwdy,dwdz,pf,sigma_g); #else compute_global_stress_tensor(uxf,uyf,uzf,pf,sigma_g,xz); #endif /* Store first column of sigma in uxf, uyf, uzf to interpolate */ for (i=0;i<nx;i++) { for(j=0;j<ny;j++) { for(k=0;k<nz;k++) { uxf[i][j][k] = sigma_g[i][j][k][0][0]; uyf[i][j][k] = sigma_g[i][j][k][1][0]; uzf[i][j][k] = sigma_g[i][j][k][2][0]; } } } /* Interpolate velocity from the mesh to the bead position: store in uint */ for(ipart=0;ipart<npart;ipart++) { for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { uint[ibead][i]=0.0; } } #if (INTERP == 2) interpolate2(xb[ipart],uint,uxf,uyf,uzf,xz); #else interpolate4(xb[ipart],uint,uxf,uyf,uzf,xz); #endif /* multiply by nrm[ibead][0] */ for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { ub[ipart][ibead][i] += uint[ibead][i]*nrm[ipart][ibead][0]; } } } /* Store second column of sigma in uxf, uyf, uzf to interpolate */ for (i=0;i<nx;i++) { for(j=0;j<ny;j++) { for(k=0;k<nz;k++) { uxf[i][j][k] = sigma_g[i][j][k][0][1]; uyf[i][j][k] = sigma_g[i][j][k][1][1]; uzf[i][j][k] = sigma_g[i][j][k][2][1]; } } } /* Interpolate velocity from the mesh to the bead position: store in uint */ for(ipart=0;ipart<npart;ipart++) { for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { uint[ibead][i]=0.0; } } #if (INTERP == 2) interpolate2(xb[ipart],uint,uxf,uyf,uzf,xz); #else interpolate4(xb[ipart],uint,uxf,uyf,uzf,xz); #endif /* multiply by nrm[ibead][1] */ for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { ub[ipart][ibead][i] += uint[ibead][i]*nrm[ipart][ibead][1]; } } } /* Store third column of sigma in uxf, uyf, uzf to interpolate */ for(i=0;i<nx;i++) { for(j=0;j<ny;j++) { for(k=0;k<nz;k++) { uxf[i][j][k] = sigma_g[i][j][k][0][2]; uyf[i][j][k] = sigma_g[i][j][k][1][2]; uzf[i][j][k] = sigma_g[i][j][k][2][2]; } } } /* Interpolate velocity from the mesh to the bead position: store in uint */ for(ipart=0;ipart<npart;ipart++) { for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { uint[ibead][i]=0.0; } } #if (INTERP == 2) interpolate2(xb[ipart],uint,uxf,uyf,uzf,xz); #else interpolate4(xb[ipart],uint,uxf,uyf,uzf,xz); #endif /* multiply by nrm[ibead][2] */ for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { ub[ipart][ibead][i] += uint[ibead][i]*nrm[ipart][ibead][2]; } } } } /******************************************************************************/ void prvec(double xb[npart][beads][3], double fb[npart][beads][3], double ub[npart][beads][3], double nrm_b[npart][beads][3]) { int i,j,k; int ibead,itriangle,jtriangle; double xi[3]; double r1; int i1,i2,i3; double pi; double u[3]; int member; double weights[npart][beads]; double uxf[nx][ny][nz], uyf[nx][ny][nz], uzf[nx][ny][nz], pf[nx][ny][nz]; double ubc1[nx][ny][3], ubc2[nx][ny][3]; double gaussx[nx][ny][nz],gaussy[nx][ny][nz],gaussz[nx][ny][nz],gaussz_p[nx][ny][nz]; double dudx[nx][ny][nz],dudy[nx][ny][nz],dudz[nx][ny][nz]; double dvdx[nx][ny][nz],dvdy[nx][ny][nz],dvdz[nx][ny][nz]; double dwdx[nx][ny][nz],dwdy[nx][ny][nz],dwdz[nx][ny][nz]; FILE *fp; char name[20]; int ix,iy,iz; double dx,dy; int ipart,jpart; double fn,f1[3],f2[3],f3[3]; pi = 4.0*atan(1.0); /* Mesh size */ dx = Lx/nx; dy = Ly/ny; /* Initialize velocity to zero */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<beads;i++) { for(j=0;j<3;j++) { ub[ipart][i][j]=0.0; } } } #if (OMP == 1) omp_set_num_threads(NTHREADS); #endif /* Contribution from local force density */ #if (npart > 1) #if (OMP == 1) #pragma omp parallel for private(ipart,jpart,ibead,j,itriangle,jtriangle,xi,i1,i2,i3,member,u) #endif #endif for(ipart=0;ipart<npart;ipart++) { #if (npart == 1) #if (OMP == 1) #pragma omp parallel for private(jpart,ibead,j,itriangle,jtriangle,xi,i1,i2,i3,member,u) #endif #endif for(ibead=0;ibead<beads;ibead++) { for(j=0;j<3;j++) { xi[j] = xb[ipart][ibead][j]; } #if (SING_SUBT == 1) fn = fb[ipart][ibead][0]*nrm_b[ipart][ibead][0] + fb[ipart][ibead][1]*nrm_b[ipart][ibead][1] + fb[ipart][ibead][2]*nrm_b[ipart][ibead][2]; #endif for(itriangle=0;itriangle<triangle_count[ipart][ibead];itriangle++) { jpart = triangle_list[ipart][ibead][itriangle][0]; jtriangle = triangle_list[ipart][ibead][itriangle][1]; i1 = triangle[jtriangle][0]; i2 = triangle[jtriangle][1]; i3 = triangle[jtriangle][2]; member=0; // printf("%lf\n",area[triangle_list[ibead][itriangle]]); if (ibead == i1 && ipart == jpart) { member=1; } else if (ibead == i2 && jpart == ipart) { i2=i1; i1 = ibead; member=1; } else if (ibead == i3 && jpart == ipart) { i3=i1; i1=ibead; member=1; } #if (SING_SUBT == 0 ) if (member == 1) { integrate2(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } else { integrate1(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } #else for(j=0;j<3;j++) { f1[j] = fb[jpart][i1][j] - fn*normal_t[jpart][jtriangle][j]; f2[j] = fb[jpart][i2][j] - fn*normal_t[jpart][jtriangle][j]; f3[j] = fb[jpart][i3][j] - fn*normal_t[jpart][jtriangle][j]; } if (member == 1) { integrate2(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,f1,f2,f3,u,alpha); } else { integrate1(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,f1,f2,f3,u,alpha); } #endif for(i=0;i<3;i++) { ub[ipart][ibead][i] += u[i]; } } } } /* fp = fopen("U.txt","w+"); for(ipart=0;ipart<npart;ipart++) { for(ibead=0;ibead<beads;ibead++) { fprintf(fp,"%d\t%E\t%E\t%E\n",ibead,ub[ipart][ibead][0],ub[ipart][ibead][0],ub[ipart][ibead][0]); } } fclose(fp); exit(0);*/ // return; /*---------------- Global solution calculation -----------------*/ /*----- Boundary Conditions ---- */ /* Initialize */ for(i=0;i<nx;i++) { for(j=0;j<ny;j++) { for(k=0;k<3;k++) { ubc1[i][j][k]=0.0; ubc2[i][j][k]=0.0; } } } /* set ubc = - u_l for poiseuille flow */ /* Top plate */ #if (OMP == 1) #pragma omp parallel for private(ix,iy,xi,itriangle,jpart,jtriangle,i1,i2,i3,member,u) #endif for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { xi[0] = ix*dx; xi[1] = iy*dy; xi[2] = Lz; for(itriangle=0;itriangle<triangle_count_t[ix][iy];itriangle++) { jpart = triangle_list_t[ix][iy][itriangle][0]; jtriangle = triangle_list_t[ix][iy][itriangle][1]; i1 = triangle[jtriangle][0]; i2 = triangle[jtriangle][1]; i3 = triangle[jtriangle][2]; member=0; // performing non-singular integral as of now if (member == 1) { integrate2(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } else { integrate1(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } for(i=0;i<3;i++) { ubc2[ix][iy][i] -= u[i]/8/pi; } } } } /* bottom plate */ #if (OMP == 1) #pragma omp parallel for private(ix,iy,xi,itriangle,jpart,jtriangle,i1,i2,i3,member,u) #endif for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { xi[0] = ix*dx; xi[1] = iy*dy; xi[2] = 0.0; for(itriangle=0;itriangle<triangle_count_b[ix][iy];itriangle++) { jpart = triangle_list_b[ix][iy][itriangle][0]; jtriangle = triangle_list_b[ix][iy][itriangle][1]; i1 = triangle[jtriangle][0]; i2 = triangle[jtriangle][1]; i3 = triangle[jtriangle][2]; member=0; // performing non-singular integral as of now if (member == 1) { integrate2(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } else { integrate1(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } for(i=0;i<3;i++) { ubc1[ix][iy][i] -= u[i]/8/pi; } } } } /* Find weights for the global solution */ /* Initialize weights for the global solution */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<beads;i++) { weights[ipart][i]=0.0; } } for(ipart=0;ipart<npart;ipart++) { for(itriangle=0;itriangle<ntriangles;itriangle++) { i1 = triangle[itriangle][0]; i2 = triangle[itriangle][1]; i3 = triangle[itriangle][2]; /* Store weights for the global calculation */ weights[ipart][i1] += area[ipart][itriangle]/3.0; weights[ipart][i2] += area[ipart][itriangle]/3.0; weights[ipart][i3] += area[ipart][itriangle]/3.0; } } /* Distribute density to mesh */ /* Initialize mesh force density */ for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { for(iz=0;iz<nz;iz++) { gaussx[ix][iy][iz] = 0.0; gaussy[ix][iy][iz] = 0.0; gaussz[ix][iy][iz] = 0.0; gaussz_p[ix][iy][iz] = 0.0; } } } for(ipart=0;ipart<npart;ipart++) { distribute_density(xb[ipart],fb[ipart],gaussx,gaussy,gaussz,gaussz_p,weights[ipart],alpha,xz); } /* printf("Here\n"); fp =fopen("rho.txt","w+"); for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { for(iz=0;iz<nz;iz++) { fprintf(fp,"%E\t%E\t%E\t%E\n",gaussx[ix][iy][iz],gaussy[ix][iy][iz],gaussz[ix][iy][iz],gaussz_p[ix][iy][iz]); } } } fclose(fp); */ /* solve for the velocity and pressure at the mesh points */ global_velocity_inhomogeneous(gaussx,gaussy,gaussz,gaussz_p,ubc1,ubc2,uxf,uyf,uzf,pf, dudx,dudy,dudz,dvdx,dvdy,dvdz,dwdx,dwdy,dwdz, xxi,cz, u1H,v1H,w1H,dwdz1H,dudz1H,dvdz1H,p1H, u2H,v2H,w2H,dwdz2H,dudz2H,dvdz2H,p2H); /* fp =fopen("Vel.txt","w+"); for(iz=0;iz<nz;iz++) { fprintf(fp,"%E\t%E\t%E\t%E\n",xz[iz],uxf[nx/3][ny/3][iz],uyf[nx/3][ny/3][iz],uzf[nx/3][ny/3][iz]); } fclose(fp); exit(0); */ /* Interpolate velocity from the mesh to the bead position */ for(ipart=0;ipart<npart;ipart++) { #if (INTERP == 2) interpolate2(xb[ipart],ub[ipart],uxf,uyf,uzf,xz); #else interpolate4(xb[ipart],ub[ipart],uxf,uyf,uzf,xz); #endif } /*fp = fopen("U.txt","w+"); for(ipart=0;ipart<npart;ipart++) { for(ibead=0;ibead<beads;ibead++) { fprintf(fp,"%d\t%E\t%E\t%E\n",ibead,ub[ipart][ibead][0],ub[ipart][ibead][1],ub[ipart][ibead][2]); } } fclose(fp); exit(0);*/ } /*------------------------------------------------------------------ Find the particle pairs which overlap or violate the minimum gap specification -------------------------------------------------------------------*/ void find_overlaps_gap(double xb[npart][beads][3], double nrm_b[npart][beads][3],int overlap_pairs[npart][npart], double min_gap[npart][npart],int *overlaps) { int ipart,jpart; int ibead,jbead; int i,j; double xi[3],xj[3]; double x,y,z,r; double xc,yc,zc; double dotp; *overlaps=0; for(ipart=0;ipart<npart;ipart++) { for(jpart=0;jpart<npart;jpart++) { overlap_pairs[ipart][jpart]=0; min_gap[ipart][jpart]=GAP_MIN; } } for(i=0;i<ovp_pair_count;i++) { ipart = ovp_list[i][0]; ibead = ovp_list[i][1]; jpart = ovp_list[i][2]; jbead = ovp_list[i][3]; for(j=0;j<3;j++) { xi[j] = xb[ipart][ibead][j]; xj[j] = xb[jpart][jbead][j]; } // difference vector x = xi[0]-xj[0]; y = xi[1]-xj[1]; z = xi[2]-xj[2]; /* correct for periodicity */ x = x - Lx*floor(x/Lx+0.5); y = y - Ly*floor(y/Ly+0.5); r = sqrt(x*x+y*y+z*z); /* find dot_product to check overlap */ dotp = (x*nrm_b[jpart][jbead][0] + y*nrm_b[jpart][jbead][1] + z*nrm_b[jpart][jbead][2])/r; if (dotp < 0.0) // overlaps { overlap_pairs[ipart][jpart]=1; overlap_pairs[jpart][ipart]=1; if ( -r < min_gap[ipart][jpart]) { min_gap[ipart][jpart]=-r; min_gap[jpart][ipart]=-r; } *overlaps = *overlaps + 1; } else if (r < GAP_MIN) { overlap_pairs[ipart][jpart]=1; overlap_pairs[jpart][ipart]=1; if ( r < min_gap[ipart][jpart]) { min_gap[ipart][jpart]=r; min_gap[jpart][ipart]=r; } *overlaps = *overlaps + 1; } } } /*------------------------------------------------------------------ Find the particle pairs which overlap or violate the minimum gap specification -------------------------------------------------------------------*/ void find_repulsive_force(double xcm[npart][3],int overlap_pairs[npart][npart], double min_gap[npart][npart], double FR[npart][3]) { int ipart,jpart; int i,j; double xi[3],xj[3]; double x,y,z,r; double num_min_gap=0.005; // numerical minimum gap double gap; double sign_x,sign_y; /* Initialize repulsive force to zero */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<3;i++) { FR[ipart][i]=0.0; } } for(ipart=0;ipart<npart-1;ipart++) { for(jpart=ipart+1;jpart<npart;jpart++) { if (overlap_pairs[ipart][jpart] != 0) { for(i=0;i<3;i++) { xi[i] = xcm[ipart][i]; xj[i] = xcm[jpart][i]; } // difference vector x = xi[0]-xj[0]; y = xi[1]-xj[1]; z = xi[2]-xj[2]; r = sqrt(x*x+y*y+z*z); gap = min_gap[ipart][jpart]; /* set a numerical minimum gap */ if (gap < num_min_gap) { gap = num_min_gap; } /* correct for periodicity */ if (fabs(x) > Lx/2) { sign_x = -1.0; } else { sign_x = 1.0; } if (fabs(y) > Ly/2) { sign_y = -1.0; } else { sign_y = 1.0; } FR[jpart][0] = (1.0 - GAP_MIN/gap)*x/r*sign_x; FR[jpart][1] = (1.0 - GAP_MIN/gap)*y/r*sign_y; FR[jpart][2] = (1.0 - GAP_MIN/gap)*z/r; FR[ipart][0] = -FR[jpart][0]; FR[ipart][1] = -FR[jpart][1]; FR[ipart][2] = -FR[jpart][2]; } } } } /* ------------------------------------------------------------------- * pushes the particle apart without rotating them to correct the * overlaps --------------------------------------------------------------------*/ void correct_overlaps(double xb[npart][beads][3], double nrm_b[npart][beads][3], double xcm[npart][3]) { int ipart,ibead,jpart; int i,j; int overlaps,overlaps0; double FR[npart][3]; int overlap_pairs[npart][npart]; double min_gap[npart][npart]; double xcm_new[npart][3]; double REX=0.001; int itr_count=0; FILE *fp; do { /* find the minimum gap violations */ find_overlaps_gap(xb,nrm_b,overlap_pairs,min_gap, &overlaps); if (itr_count == 0) { overlaps0 = overlaps; /* set overlap indicator */ for(ipart=0;ipart<npart;ipart++) { overlap_indicator[ipart]=0; for(jpart=0;jpart<npart;jpart++) { overlap_indicator[ipart] += overlap_pairs[ipart][jpart]; } } } if (overlaps == 0 || itr_count > 1000) { break; } else { #if (PRINT==1) printf("Number of overlaps = %d\t initial = %d\t iteration count = %d\n ",overlaps,overlaps0,itr_count); #endif } /* find the repsulsive force */ find_repulsive_force(xcm,overlap_pairs,min_gap,FR); /* move the particles */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<3;i++) { xcm_new[ipart][i] = xcm[ipart][i] + REX*FR[ipart][i]; } } /* find new xb */ for(ipart=0;ipart<npart;ipart++) { for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { xb[ipart][ibead][i] = xb[ipart][ibead][i] + (xcm_new[ipart][i] - xcm[ipart][i]); } } } /* update center of mass */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<3;i++) { xcm[ipart][i] = xcm_new[ipart][i]; } } itr_count++; } while (1); fp = fopen("overlap.txt","a+"); fprintf(fp,"%d\t%d\t%d\n ",overlaps,overlaps0,itr_count); fclose(fp); } /*----------------------------------------------------------------------------------- -------------------------------------------------------------------------------------*/ void compute_total_grid_vel(double xb[npart][beads][3], double fb[npart][beads][3], int psteps) { int i,j,k; int ibead,itriangle,jtriangle; double xi[3]; double r1; int i1,i2,i3; double pi; double u[3]; int member; double weights[npart][beads]; double uxf[nx][ny][nz], uyf[nx][ny][nz], uzf[nx][ny][nz],pf[nx][ny][nz]; double ubc1[nx][ny][3], ubc2[nx][ny][3]; double gaussx[nx][ny][nz],gaussy[nx][ny][nz],gaussz[nx][ny][nz],gaussz_p[nx][ny][nz]; FILE *fp; char name[20]; int ix,iy,iz; double dx,dy,dz; int ipart,jpart; double dudx[nx][ny][nz],dudy[nx][ny][nz],dudz[nx][ny][nz]; double dvdx[nx][ny][nz],dvdy[nx][ny][nz],dvdz[nx][ny][nz]; double dwdx[nx][ny][nz],dwdy[nx][ny][nz],dwdz[nx][ny][nz]; pi = 4.0*atan(1.0); /* Mesh size */ dx = Lx/nx; dy = Ly/ny; dz = Lz/(nz-1.0); #if (OMP == 1) omp_set_num_threads(NTHREADS); #endif /*---------------- Global solution calculation -----------------*/ /*----- Boundary Conditions ---- */ /* Initialize */ for(i=0;i<nx;i++) { for(j=0;j<ny;j++) { for(k=0;k<3;k++) { ubc1[i][j][k]=0.0; ubc2[i][j][k]=0.0; } } } /* set ubc = - u_l for poiseuille flow */ /* Top plate */ for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { xi[0] = ix*dx; xi[1] = iy*dy; xi[2] = Lz; for(itriangle=0;itriangle<triangle_count_t[ix][iy];itriangle++) { jpart = triangle_list_t[ix][iy][itriangle][0]; jtriangle = triangle_list_t[ix][iy][itriangle][1]; i1 = triangle[jtriangle][0]; i2 = triangle[jtriangle][1]; i3 = triangle[jtriangle][2]; member=0; // performing non-singular integral as of now if (member == 1) { integrate2(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } else { integrate1(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } for(i=0;i<3;i++) { ubc2[ix][iy][i] -= u[i]/8/pi; } } } } /* bottom plate */ for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { xi[0] = ix*dx; xi[1] = iy*dy; xi[2] = 0.0; for(itriangle=0;itriangle<triangle_count_b[ix][iy];itriangle++) { jpart = triangle_list_b[ix][iy][itriangle][0]; jtriangle = triangle_list_b[ix][iy][itriangle][1]; i1 = triangle[jtriangle][0]; i2 = triangle[jtriangle][1]; i3 = triangle[jtriangle][2]; member=0; // performing non-singular integral as of now if (member == 1) { integrate2(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } else { integrate1(xb[jpart][i1],xb[jpart][i2],xb[jpart][i3],xi,fb[jpart][i1],fb[jpart][i2],fb[jpart][i3],u,alpha); } for(i=0;i<3;i++) { ubc1[ix][iy][i] -= u[i]/8/pi; } } } } /* Find weights for the global solution */ /* Initialize weights for the global solution */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<beads;i++) { weights[ipart][i]=0.0; } } for(ipart=0;ipart<npart;ipart++) { for(itriangle=0;itriangle<ntriangles;itriangle++) { i1 = triangle[itriangle][0]; i2 = triangle[itriangle][1]; i3 = triangle[itriangle][2]; /* Store weights for the global calculation */ weights[ipart][i1] += area[ipart][itriangle]/3.0; weights[ipart][i2] += area[ipart][itriangle]/3.0; weights[ipart][i3] += area[ipart][itriangle]/3.0; } } /* Distribute density to mesh */ /* Initialize mesh force density */ for(ix=0;ix<nx;ix++) { for(iy=0;iy<ny;iy++) { for(iz=0;iz<nz;iz++) { gaussx[ix][iy][iz] = 0.0; gaussy[ix][iy][iz] = 0.0; gaussz[ix][iy][iz] = 0.0; gaussz_p[ix][iy][iz] = 0.0; } } } for(ipart=0;ipart<npart;ipart++) { distribute_density(xb[ipart],fb[ipart],gaussx,gaussy,gaussz,gaussz_p,weights[ipart],alpha,xz); } /* solve for the velocity and pressure at the mesh points */ /* solve for the velocity and pressure at the mesh points */ global_velocity_inhomogeneous(gaussx,gaussy,gaussz,gaussz_p,ubc1,ubc2,uxf,uyf,uzf,pf, dudx,dudy,dudz,dvdx,dvdy,dvdz,dwdx,dwdy,dwdz, xxi,cz, u1H,v1H,w1H,dwdz1H,dudz1H,dvdz1H,p1H, u2H,v2H,w2H,dwdz2H,dudz2H,dvdz2H,p2H); /* Add local contribution to the velocity at grid points: doing in an umoptimized fashion, as this is for test purposes only */ #if (OMP == 1) #pragma omp parallel for private(ix,iy,iz,xi,itriangle,ipart,i1,i2,i3,member,u) #endif for(ix=0;ix<nx;ix++) { for(iy=ny/2;iy<ny/2+1;iy++) // midplane only { for(iz=0;iz<nz;iz++) { xi[0] = ix*dx; xi[1] = iy*dy; xi[2] = iz*dz; for(ipart=0;ipart<npart;ipart++) { for(itriangle=0;itriangle<ntriangles;itriangle++) { i1 = triangle[itriangle][0]; i2 = triangle[itriangle][1]; i3 = triangle[itriangle][2]; member=0; // performing non-singular integral as of now if (member == 1) { integrate2(xb[ipart][i1],xb[ipart][i2],xb[ipart][i3],xi,fb[ipart][i1],fb[ipart][i2],fb[ipart][i3],u,alpha); } else { integrate1(xb[ipart][i1],xb[ipart][i2],xb[ipart][i3],xi,fb[ipart][i1],fb[ipart][i2],fb[ipart][i3],u,alpha); } /* sum to grid velocity array */ uxf[ix][iy][iz] -= u[0]/8/pi; uyf[ix][iy][iz] -= u[1]/8/pi; uzf[ix][iy][iz] -= u[2]/8/pi; } } } } } if (CONDOR==0) { sprintf(name,"output/vel_grid_%03d.vtk",psteps); } else { sprintf(name,"vel_grid_%03d.vtk",psteps); } fp = fopen(name,"w+"); for(ix=0;ix<nx;ix++) { for(iz=0;iz<nz;iz++) { fprintf(fp,"%E\t%E\t%E\t%E\t%E\t%E\n",ix*dx,ny/2*dy,iz*dz,uxf[ix][ny/2][iz],uyf[ix][ny/2][iz],uzf[ix][ny/2][iz]); } } fclose(fp); } /*------------------------------------------------------------ Computes initial volume ------------------------------------------------------------*/ void compute_vol(double xb[beads][3], double xcm[3], double nrm[beads][3], double *Vol0, int ipart) { int itriangle; int i1,i2,i3; /*------- Volume averaged velocity inside the capsule----------- */ for(itriangle=0;itriangle<ntriangles;itriangle++) { i1 = triangle[itriangle][0]; i2 = triangle[itriangle][1]; i3 = triangle[itriangle][2]; // normal of beads //integrate_vol(xb[i1],xb[i2],xb[i3],xcm,nrm[i1],nrm[i2],nrm[i3],Vol0); // normal of triangel integrate_vol(xb[i1],xb[i2],xb[i3],xcm,normal_t[ipart][itriangle],normal_t[ipart][itriangle], normal_t[ipart][itriangle],Vol0); } } /*----------------------------------------------*/ /* -------- randomly rotates the swimmers ------ */ /*----------------------------------------------*/ void rand_rotate(double xb[npart][beads][3], double xcm0[npart][3]) { int i,j,k,ipart; double Qrot[3][3],urot[3],norm_rot,xrot[3],xrotn[3]; /* apply a random rotation */ srand(time(NULL)); for(ipart=0;ipart<npart;ipart++) { urot[0]= 1.0;//rand(); urot[1]= 0.0;//rand(); urot[2]= 1.0;//rand(); norm_rot = sqrt(urot[0]*urot[0]+urot[1]*urot[1]+urot[2]*urot[2]); urot[0] = urot[0]/norm_rot; urot[1] = urot[1]/norm_rot; urot[2] = urot[2]/norm_rot; for(i=0;i<3;i++) { for(j=0;j<3;j++) { if (i==j) { Qrot[i][j] = 1.0-2.0*urot[i]*urot[j]; } else { Qrot[i][j]=-2.0*urot[i]*urot[j]; } } } for(i=0;i<beads;i++) { for(j=0;j<3;j++) { xrot[j] = xb[ipart][i][j] - xcm0[ipart][j]; } for(j=0;j<3;j++) { xrotn[j]=0.0; for(k=0;k<3;k++) { xrotn[j] += Qrot[j][k]*xrot[k]; } } for(j=0;j<3;j++) { xb[ipart][i][j] = xcm0[ipart][j] + xrotn[j]; } } } } /*------------------------------------------------------- */ /* -------- rotates the swimmers by directed angle------ */ /*--------------------------------------------------------*/ void dir_rotate(double xb[npart][beads][3], double xcm0[npart][3], double theta_rot) { int i,j,k,ipart; double Qrot[3][3],theta,pi,xrot[3],xrotn[3]; pi = 4.0*atan(1.0); /* apply a random rotation */ // srand(time(NULL)); theta = pi*(theta_rot/180); //printf("theta = %E\t cos = %E\t THETA_rot = %E\n",theta,cos(theta),theta_rot); for(ipart=0;ipart<npart;ipart++) { //urot[0]= 1.0;//rand(); //urot[1]= 0.0;//rand(); //urot[2]= 1.0;//rand(); //norm_rot = sqrt(urot[0]*urot[0]+urot[1]*urot[1]+urot[2]*urot[2]); //urot[0] = urot[0]/norm_rot; //urot[1] = urot[1]/norm_rot; //urot[2] = urot[2]/norm_rot; //for(i=0;i<3;i++) //{ //for(j=0;j<3;j++) //{ //if (i==j) //{ //Qrot[i][j] = 1.0-2.0*urot[i]*urot[j]; //} //else //{ //Qrot[i][j]=-2.0*urot[i]*urot[j]; //} //} //} /* First rotation by 90 about z-axis and theta about x-axis */ //Qrot[0][0] = 1; //Qrot[0][1] = 0; //Qrot[0][2] = 0; //Qrot[1][0] = 0; //Qrot[1][1] = cos(theta); //Qrot[1][2] = -sin(theta); //Qrot[2][0] = 0; //Qrot[2][1] = sin(theta); //Qrot[2][2] = cos(theta); /* First rotation by 90 about z-axis and theta about y-axis(commented part) */ Qrot[0][0] = cos(theta); Qrot[0][1] = 0; Qrot[0][2] = sin(theta); Qrot[1][0] = 0; Qrot[1][1] = 1; Qrot[1][2] = 0; Qrot[2][0] = -sin(theta); Qrot[2][1] = 0; Qrot[2][2] = cos(theta); for(i=0;i<beads;i++) { for(j=0;j<3;j++) { xrot[j] = xb[ipart][i][j] - xcm0[ipart][j]; } for(j=0;j<3;j++) { xrotn[j]=0.0; for(k=0;k<3;k++) { xrotn[j] += Qrot[j][k]*xrot[k]; } } for(j=0;j<3;j++) { xb[ipart][i][j] = xcm0[ipart][j] + xrotn[j]; } } } } /*----------------------------------------------------------------------------------- // rotates so as to make ibead=0 on the -ve z-axis -------------------------------------------------------------------------------------*/ void rotate_z_align(double xb[beads][3],double xcm[3]) { int i,j,k; int ibead; double norm; double u[3],Q[3][3],zp[3]; double xb_rot[beads][3]; /* find the displacement wrt to the center of mass */ for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { xb_rot[ibead][i] = xb[ibead][i] - xcm[i]; } } /* Store ibead=0 as the z-axis in the old frame of reference */ zp[0] = xb_rot[0][0]; zp[1] = xb_rot[0][1]; zp[2] = xb_rot[0][2]; /* find the rotation matrix: Use householder's algorithm */ if (fabs(zp[2]-1) > 1E-10) { u[0] = 0.0 - zp[0]; u[1] = 0.0 - zp[1]; u[2] = 1.0 - zp[2]; norm = sqrt(u[0]*u[0]+u[1]*u[1]+u[2]*u[2]); /* normalize */ u[0] = u[0]/norm; u[1] = u[1]/norm; u[2] = u[2]/norm; /* Find the Q matrix */ for(i=0;i<3;i++) { for(j=0;j<3;j++) { if (i==j) { Q[i][j] = 1.0 - 2.0*u[i]*u[j]; } else { Q[i][j] = -2.0*u[i]*u[j]; } } } } else // no rotation necessary Q=I { for(i=0;i<3;i++) { for(j=0;j<3;j++) { if (i==j) { Q[i][j] = 1.0; } else { Q[i][j] = 0.0; } } } } /* transform the coordinates */ for(ibead=0;ibead<beads;ibead++) { for(i=0;i<3;i++) { xb[ibead][i] = xcm[i]; for(j=0;j<3;j++) { xb[ibead][i] += Q[i][j]*xb_rot[ibead][j]; } } } } /*---------------------------------------------------------------*/ /*---- Computes the particle contribution to the stress tensor ---*/ /*---------------------------------------------------------------*/ void compute_bulk_stress_tensor(double xb[npart][beads][3],double xcm[npart][3], double fm[npart][beads][3],double ub[npart][beads][3],double nrm[npart][beads][3], double Sigma[npart][3][3],double Sigma_g[3][3],double Sigma_g1[3][3], double Sigma_g2[3][3],double Vcell, double viscr[npart], double Stime) { int i,j,k,ibead,ipart; int itriangle; int i1,i2,i3; double S12,N1,N2; double S12a,N1a,N2a; double S12b,N1b,N2b; int npart1,npart2; FILE *fp; char name[20]; double ndensity; npart2 = NRATIO*npart; npart1 = npart - npart2; /* Initialize */ for(i=0;i<3;i++) { for(j=0;j<3;j++) { Sigma_g[i][j]=0.0; Sigma_g1[i][j]=0.0; Sigma_g2[i][j]=0.0; } } /*------- Particle's contribution to bulk stress tensor----------- */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<3;i++) { for(j=0;j<3;j++) { Sigma[ipart][i][j]=0.0; } } for(itriangle=0;itriangle<ntriangles;itriangle++) { i1 = triangle[itriangle][0]; i2 = triangle[itriangle][1]; i3 = triangle[itriangle][2]; // normal of beads // integrate_stress(xb[ipart][i1],xb[ipart][i2],xb[ipart][i3],xcm[ipart],ub[ipart][i1],ub[ipart][i2], // ub[ipart][i3],nrm[ipart][i1],nrm[ipart][i2],nrm[ipart][i3],fm[ipart][i1], // fm[ipart][i2],fm[ipart][i3],Sigma[ipart],viscr[ipart]); // normal of triangle integrate_stress(xb[ipart][i1],xb[ipart][i2],xb[ipart][i3],xcm[ipart],ub[ipart][i1],ub[ipart][i2], ub[ipart][i3],normal_t[ipart][itriangle],normal_t[ipart][itriangle],normal_t[ipart][itriangle], fm[ipart][i1], fm[ipart][i2],fm[ipart][i3],Sigma[ipart],viscr[ipart]); } } /* find the particle's contribution to bulk stress */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<3;i++) { for(j=0;j<3;j++) { Sigma_g[i][j] += Sigma[ipart][i][j]; } } } /* find the particle's contribution to bulk stress: Ist type */ for(ipart=0;ipart<npart1;ipart++) { for(i=0;i<3;i++) { for(j=0;j<3;j++) { Sigma_g1[i][j] += Sigma[ipart][i][j]; } } } /* find the particle's contribution to bulk stress: 2nd type */ for(ipart=npart1;ipart<npart;ipart++) { for(i=0;i<3;i++) { for(j=0;j<3;j++) { Sigma_g2[i][j] += Sigma[ipart][i][j]; } } } /* Volume averaged contribution */ for(i=0;i<3;i++) { for(j=0;j<3;j++) { Sigma_g[i][j]=Sigma_g[i][j]/Vcell; Sigma_g1[i][j]=Sigma_g1[i][j]/Vcell*npart/npart1; if (npart2 > 0) { Sigma_g2[i][j]=Sigma_g2[i][j]/Vcell*npart/npart2; } } } S12 = 0.5*(Sigma_g[0][2]+Sigma_g[2][0]); // xy N1 = Sigma_g[0][0]-Sigma_g[2][2]; // xx - yy N2 = Sigma_g[2][2]-Sigma_g[1][1]; // yy - zz S12a = 0.5*(Sigma_g1[0][2]+Sigma_g1[2][0]); N1a = Sigma_g1[0][0]-Sigma_g1[2][2]; N2a = Sigma_g1[2][2]-Sigma_g1[1][1]; if (npart2 > 0) { S12b = 0.5*(Sigma_g2[0][2]+Sigma_g2[2][0]); N1b = Sigma_g2[0][0]-Sigma_g2[2][2]; N2b = Sigma_g2[2][2]-Sigma_g2[1][1]; } /* print */ if (CONDOR == 1) { fp = fopen("Stress.txt","a+"); } else { fp = fopen("output/Stress.txt","a+"); } if(npart2 > 0) { fprintf(fp,"%E %E %E\t%E %E %E\t%E %E %E\n",S12,N1,N2,S12a,N1a,N2a,S12b,N1b,N2b); } else { fprintf(fp,"%E %E %E\t%E %E %E\n",S12,N1,N2,S12a,N1a,N2a); } fclose(fp); if (PRINT == 1) { printf("S12 = %E\t N1 = %E\t N2 = %E\n",S12,N1,N2); printf("S12a = %E\t N1a = %E\t N2a = %E\n",S12a,N1a,N2a); if (npart2>0) { printf("S12b = %E\t N1b = %E\t N2b= %E\n",S12b,N1b,N2b); } } /* print per particle stress tensor */ ndensity = npart/Vcell; for(ipart=0;ipart<npart;ipart++) { if (CONDOR==1) { sprintf(name,"stress_%03d.txt",ipart); } else { sprintf(name,"output/stress_%03d.txt",ipart); } S12 = 0.5*(Sigma[ipart][0][2]+Sigma[ipart][2][0]); // xy N1 = Sigma[ipart][0][0]-Sigma[ipart][2][2]; // xx - yy N2 = Sigma[ipart][2][2]-Sigma[ipart][1][1]; // yy - zz fp = fopen(name,"a+"); fprintf(fp,"%E\t%E\t%E\n",S12*ndensity,N1*ndensity,N2*ndensity); fclose(fp); } } /*-------------------------------------------------------------------------------------*/ /* ----- Computes volume averaged velocity and rate of rotation of particles ---------*/ /*-------------------------------------------------------------------------------------*/ void volume_avg_u_omega(double ub[npart][beads][3],double nrm[npart][beads][3], double xcm[npart][3],double xb[npart][beads][3],double Stime) { int i,j,k,ibead,ipart; int itriangle; int i1,i2,i3; double pi; double uvol[npart][3],uvolw[npart]; double xcm_v[npart][3]; double Omega[3][3],omega[npart][3]; int epsilon[3][3][3]; double x1[3],x2[3],x3[3]; FILE *fp; char name[20]; pi = 4.0*atan(1.0); /* set permutation operator */ for(i=0;i<3;i++) { for(j=0;j<3;j++) { for(k=0;k<3;k++) { if(i==j || i==k || j==k) { epsilon[i][j][k]=0; } else { if(i==0) { if(j==1) { epsilon[i][j][k]=1; } else { epsilon[i][j][k]=-1; } } if(i==1) { if(j==2) { epsilon[i][j][k]=1; } else { epsilon[i][j][k]=-1; } } if(i==2) { if(j==0) { epsilon[i][j][k]=1; } else { epsilon[i][j][k]=-1; } } } } } } /*------- Volume averaged velocity and rotation inside the capsule----------- */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<3;i++) { uvol[ipart][i]=0.0; for(j=0;j<3;j++) { Omega[i][j]=0.0; } } uvolw[ipart]=0.0; for(itriangle=0;itriangle<ntriangles;itriangle++) { i1 = triangle[itriangle][0]; i2 = triangle[itriangle][1]; i3 = triangle[itriangle][2]; // normal of beads //integrate_trans_vol(xb[ipart][i1],xb[ipart][i2],xb[ipart][i3],xcm[ipart],ub[ipart][i1],ub[ipart][i2],ub[ipart][i3],nrm[ipart][i1],nrm[ipart][i2],nrm[ipart][i3],uvol[ipart],Omega,&uvolw[ipart]); // normal of triangle integrate_trans_vol(xb[ipart][i1],xb[ipart][i2],xb[ipart][i3],xcm[ipart],ub[ipart][i1],ub[ipart][i2],ub[ipart][i3],normal_t[ipart][itriangle],normal_t[ipart][itriangle],normal_t[ipart][itriangle],uvol[ipart],Omega,&uvolw[ipart]); } /* volume averaged velocity */ for(i=0;i<3;i++) { uvol[ipart][i] = uvol[ipart][i]/uvolw[ipart]; } /* volume averaged rotation rate */ for(i=0;i<3;i++) { omega[ipart][i]=0.0; for(j=0;j<3;j++) { for(k=0;k<3;k++) { omega[ipart][i] += 0.5*epsilon[i][j][k]*Omega[j][k]; } } } } /*------- Volume averaged center of mass of the capsule----------- */ for(ipart=0;ipart<npart;ipart++) { for(i=0;i<3;i++) { xcm_v[ipart][i]=0.0; } for(itriangle=0;itriangle<ntriangles;itriangle++) { i1 = triangle[itriangle][0]; i2 = triangle[itriangle][1]; i3 = triangle[itriangle][2]; for(i=0;i<3;i++) { x1[i] = xb[ipart][i1][i] - xcm[ipart][i]; x2[i] = xb[ipart][i2][i] - xcm[ipart][i]; x3[i] = xb[ipart][i3][i] - xcm[ipart][i]; } // normal at beads //integrate_rsq_vol(x1,x2,x3,nrm[ipart][i1],nrm[ipart][i2],nrm[ipart][i3],xcm_v[ipart]); // normal of triangle integrate_rsq_vol(x1,x2,x3,normal_t[ipart][itriangle], normal_t[ipart][itriangle],normal_t[ipart][itriangle],xcm_v[ipart]); } for(i=0;i<3;i++) { xcm_v[ipart][i] = xcm_v[ipart][i]/uvolw[ipart] + xcm[ipart][i]; } } if(PRINT==1) { if (npart==1) { printf("Volume average U: %E\t%E\t%E\n",uvol[0][0],uvol[0][1],uvol[0][2]); printf("Vol avg CM: %E\t%E\t%E\n",xcm_v[0][0],xcm_v[0][1],xcm_v[0][2]); printf("Volume = %E\n",uvolw[0]/4/pi*3); } } for(ipart=0;ipart<npart;ipart++) { if (CONDOR==1) { sprintf(name,"pos_vel_%03d.txt",ipart); } else { sprintf(name,"output/pos_vel_%03d.txt",ipart); } fp = fopen(name,"a+"); fprintf(fp,"%E\t",Stime); #if (DETAILED==1) fprintf(fp,"%E\t%E\t%E\t",xcm_v[ipart][0],xcm_v[ipart][1],xcm_v[ipart][2]); fprintf(fp,"%E\t%E\t%E\t",uvol[ipart][0],uvol[ipart][1],uvol[ipart][2]); fprintf(fp,"%E\t%E\t%E\n",omega[ipart][0],omega[ipart][1],omega[ipart][2]); #else //fprintf(fp,"%E\t%E\t%E\n",xcm_v[ipart][0],xcm_v[ipart][1],xcm_v[ipart][2]); fprintf(fp,"%E\t%E\t%E\n",uvol[ipart][0],uvol[ipart][1],uvol[ipart][2]); #endif fclose(fp); } } /*----------------------------------------------------------------------------------- Find Taylor deformation parameter -------------------------------------------------------------------------------------*/ void deformation_parameter(double xb[beads][3],double xcm[3],double Le[3], double thetae[2],double phie[2]) { int i,j; double max=-100,min=100; double dist; double x,y,z; for(i=0;i<beads;i++) { x = xb[i][0]-xcm[0]; y = xb[i][1]-xcm[1]; z = xb[i][2]-xcm[2]; dist = sqrt(x*x+y*y+z*z); if (dist < min) { min = dist; Le[0] = dist; thetae[0] = acos(y/dist); phie[0] = atan(z/x); } if (dist > max) { max = dist; Le[1] = dist; thetae[1] = acos(y/dist); phie[1] = atan(z/x); } } } /*----------------------------------------------------------------------------------- Find Taylor deformation parameter using mass moment of inertia tensor -------------------------------------------------------------------------------------*/ void mmoi_deformation_parameter(double xb[beads][3], double xcm0[3], double DD[2], double Stime, int ipart, int print) { int i, j, k, ii, jj, kk; double mmoi[3][3],delta[3][3], xcm[3]; double dxij[3], dxjk[3], dxik[3]; double xcm_to_cm[3], normal[3], v[3]; double rij,rjk,rik,s,area_l,norm,theta_cm_to_cm, r2; int N, LDA, LWORK, INFO; double eigenv[3]; double* work; double wkopt; double lmax, lmin, lmid, pi; FILE *fp; char name[20]; pi = 4*atan2(1,1); for(i=0;i<3;i++) { for(j=0;j<3;j++) { mmoi[i][j] = 0.0; delta[i][j] = 0.0; } } for(i=0;i<3;i++) { delta[i][i] = 1.0; } for(i=0;i<ntriangles;i++) { for(j=0;j<3;j++) { dxij[j] = xb[triangle[i][1]][j]-xb[triangle[i][0]][j]; dxjk[j] = xb[triangle[i][2]][j]-xb[triangle[i][1]][j]; dxik[j] = xb[triangle[i][0]][j]-xb[triangle[i][2]][j]; } rij = sqrt(dot_product(3,dxij,dxij)); rjk = sqrt(dot_product(3,dxjk,dxjk)); rik = sqrt(dot_product(3,dxik,dxik)); s = 0.5*(rij+rjk+rik); area_l = sqrt(s*(s-rij)*(s-rjk)*(s-rik)); /* center of mass of the triangle */ for(j=0;j<3;j++) { xcm[j] = (xb[triangle[i][0]][j] + xb[triangle[i][1]][j] + xb[triangle[i][2]][j])/3.0; } cross_product(dxij, dxjk, normal); norm = sqrt(dot_product(3,normal,normal)); for(j=0;j<3;j++) { normal[j] = normal[j]/norm; } for(j=0;j<3;j++) { xcm_to_cm[j] = xcm0[j] - xcm[j]; } norm = sqrt(dot_product(3,xcm_to_cm,xcm_to_cm)); for(j=0;j<3;j++) { xcm_to_cm[j] = xcm_to_cm[j]/norm; } theta_cm_to_cm = acos(dot_product(3,normal,xcm_to_cm)); if(theta_cm_to_cm < pi/2.0) { for(j=0;j<3;j++) { normal[j] = -normal[j]; } } for(j=0;j<3;j++) { v[j] = xcm[j] - xcm0[j]; } r2 = dot_product(3,v,v); for(ii=0;ii<3;ii++) { for(jj=0;jj<3;jj++) { for(kk=0;kk<3;kk++) { mmoi[ii][jj] += (r2*v[kk]*delta[ii][jj] - v[ii]*v[jj]*v[kk])*normal[kk]*area_l; } } } } mmoi[1][0] = 0.0; mmoi[2][0] = 0.0; mmoi[2][1] = 0.0; N=3;LDA=3;LWORK=-1; dsyev_( "V", "L", &N, mmoi, &LDA, eigenv, &wkopt, &LWORK, &INFO ); LWORK = (int)wkopt; printf("%d",LWORK); work = (double*)malloc( LWORK*sizeof(double) ); /* Solve eigenproblem */ dsyev_( "V", "L", &N, mmoi, &LDA, eigenv, work, &LWORK, &INFO ); /* Check for convergence */ if( INFO > 0 ) { printf( "The algorithm failed to compute eigenvalues.\n" ); exit( 1 ); } //kaiser(mmoi, 3, 3, eigenv, trace, sume, ier); lmax = sqrt(eigenv[2]+eigenv[1]-eigenv[0]); lmin = sqrt(eigenv[1]+eigenv[0]-eigenv[2]); lmid = sqrt(eigenv[0]+eigenv[2]-eigenv[1]); DD[0] = (lmax-lmin)/(lmax+lmin); DD[1] = atan(mmoi[0][2]/mmoi[0][0]); /* Print Eigenvalues and Eigenvectors */ if (PRINT == 1 || print == 1) { if (CONDOR==1) { sprintf(name,"Eigenval_vector.txt"); } else { sprintf(name,"output/Eigenval_vector.txt"); } fp=fopen(name,"a+"); fprintf(fp,"%lf\t%d\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",Stime,ipart,eigenv[0],eigenv[1],eigenv[2], mmoi[0][0],mmoi[0][1],mmoi[0][2],mmoi[1][0],mmoi[1][1],mmoi[1][2],mmoi[2][0],mmoi[2][1],mmoi[2][2]); fclose(fp); } } /*----------------------------------------------------------------------------------- find area weighted center of mass of the body -------------------------------------------------------------------------------------*/ void compute_center_mass_area(double xb[beads][3], double xcm0[3]) { int i,j; double xcm[3],xcm_to_cm[3]; double dxij[3],dxjk[3],dxik[3]; double rij,rjk,rik,s,theta_cm_to_cm; double area_sum=0.0; double area_l; for(i=0;i<3;i++) { xcm0[i]=0.0; } for(i=0;i<ntriangles;i++) { for(j=0;j<3;j++) { dxij[j] = xb[triangle[i][1]][j]-xb[triangle[i][0]][j]; dxjk[j] = xb[triangle[i][2]][j]-xb[triangle[i][1]][j]; dxik[j] = xb[triangle[i][0]][j]-xb[triangle[i][2]][j]; } rij = sqrt(dot_product(3,dxij,dxij)); rjk = sqrt(dot_product(3,dxjk,dxjk)); rik = sqrt(dot_product(3,dxik,dxik)); s = 0.5*(rij+rjk+rik); area_l = sqrt(s*(s-rij)*(s-rjk)*(s-rik)); area_sum += area_l; /* center of mass of the triangle */ for(j=0;j<3;j++) { xcm[j] = (xb[triangle[i][0]][j] + xb[triangle[i][1]][j] + xb[triangle[i][2]][j])/3.0; } /* sum to center of mass of the body */ for(j=0;j<3;j++) { xcm0[j] += xcm[j]*area_l; } } /* Find CM */ for(j=0;j<3;j++) { xcm0[j] = xcm0[j]/area_sum; } if(PRINT==1) { if (npart == 1) { printf("xcm=%lf\tycm=%lf\tzcm=%lf\trad=%lf\n",xcm0[0],xcm0[1],xcm0[2],sqrt(area_sum/4/3.14)); } } } /*----------------------------------------------------------------------------------- computes the traction at nodes due to the imposed flow:finf -------------------------------------------------------------------------------------*/ void compute_finf(double finf[beads][3],double xb[beads][3],double nrm[beads][3]) { int ibead; double pi; double U0; double gdot=GDOT; /* Compute the stress tensor and then traction at each node */ if (SHEAR==0) { /* set U0=centerline velocity */ U0 = gdot*Lz/4.0; for(ibead=0;ibead<beads;ibead++) { finf[ibead][0] = 8*U0*xb[ibead][0]/Lz/Lz*nrm[ibead][0] + 4*U0/Lz*(1-2*xb[ibead][2]/Lz)*nrm[ibead][2]; finf[ibead][1] = 8*U0*xb[ibead][0]/Lz/Lz*nrm[ibead][1]; finf[ibead][2] = 8*U0*xb[ibead][0]/Lz/Lz*nrm[ibead][2] + 4*U0/Lz*(1-2*xb[ibead][2]/Lz)*nrm[ibead][0]; } } else { for(ibead=0;ibead<beads;ibead++) { finf[ibead][0] = gdot*nrm[ibead][2]; finf[ibead][1] = 0.0; finf[ibead][2] = gdot*nrm[ibead][0]; } } } /*--------------------------------------------------*/ /* sets the viscosity ratio and membrane stiffness */ /*-------------------------------------------------*/ void set_viscr(double viscr[npart], double krbc_r[npart]) { int ipart; double randr; int npart1,npart2; npart2 = NRATIO*npart; npart1 = npart - npart2; /* set viscosity ratio */ if (LAMBDA == 1) { for(ipart=0;ipart<npart;ipart++) { viscr[ipart]=1.0; } } else { for(ipart=0;ipart<npart1;ipart++) { viscr[ipart]=LAMBDA1; } for(ipart=npart1;ipart<npart;ipart++) { viscr[ipart]=LAMBDA2; } } /* set membrane shear modulus */ for(ipart=0;ipart<npart1;ipart++) { krbc_r[ipart] = 1.0; } for(ipart=npart1;ipart<npart;ipart++) { krbc_r[ipart] = KRBC_R; } }
par_relax_more.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * a few more relaxation schemes: Chebychev, FCF-Jacobi, CG - * these do not go through the CF interface (hypre_BoomerAMGRelaxIF) * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "float.h" /****************************************************************************** * * use Gershgorin discs to estimate smallest and largest eigenvalues * A is assumed to be symmetric * For SPD matrix, it returns [0, max_eig = max (aii + ri)], * ri is radius of disc centered at a_ii * For SND matrix, it returns [min_eig = min (aii - ri), 0] * * scale > 0: compute eigen estimate of D^{-1/2}*A*D^{-1/2}, where * D = diag(A) for SPD matrix, D = -diag(A) for SND * * scale = 1: The algorithm is performed on D^{-1}*A, since it * has the same eigenvalues as D^{-1/2}*A*D^{-1/2} * scale = 2: The algorithm is performed on D^{-1/2}*A*D^{-1/2} (TODO) * *****************************************************************************/ HYPRE_Int hypre_ParCSRMaxEigEstimateHost( hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal? */ HYPRE_Real *max_eig, HYPRE_Real *min_eig ) { HYPRE_Int A_num_rows = hypre_ParCSRMatrixNumRows(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(hypre_ParCSRMatrixOffd(A)); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(hypre_ParCSRMatrixOffd(A)); HYPRE_Real *diag = NULL; HYPRE_Int i, j; HYPRE_Real e_max, e_min; HYPRE_Real send_buf[2], recv_buf[2]; HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A); if (scale > 1) { diag = hypre_TAlloc(HYPRE_Real, A_num_rows, memory_location); } for (i = 0; i < A_num_rows; i++) { HYPRE_Real a_ii = 0.0, r_i = 0.0, lower, upper; for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++) { if (A_diag_j[j] == i) { a_ii = A_diag_data[j]; } else { r_i += hypre_abs(A_diag_data[j]); } } for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++) { r_i += hypre_abs(A_offd_data[j]); } lower = a_ii - r_i; upper = a_ii + r_i; if (scale == 1) { lower /= hypre_abs(a_ii); upper /= hypre_abs(a_ii); } if (i) { e_max = hypre_max(e_max, upper); e_min = hypre_min(e_min, lower); } else { e_max = upper; e_min = lower; } } send_buf[0] = -e_min; send_buf[1] = e_max; /* get e_min e_max across procs */ hypre_MPI_Allreduce(send_buf, recv_buf, 2, HYPRE_MPI_REAL, hypre_MPI_MAX, hypre_ParCSRMatrixComm(A)); e_min = -recv_buf[0]; e_max = recv_buf[1]; /* return */ if ( hypre_abs(e_min) > hypre_abs(e_max) ) { *min_eig = e_min; *max_eig = hypre_min(0.0, e_max); } else { *min_eig = hypre_max(e_min, 0.0); *max_eig = e_max; } hypre_TFree(diag, memory_location); return hypre_error_flag; } /** * @brief Estimates the max eigenvalue using infinity norm. Will determine * whether or not to use host or device internally * * @param[in] A Matrix to relax with * @param[in] to scale by diagonal * @param[out] Maximum eigenvalue */ HYPRE_Int hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Real *max_eig, HYPRE_Real *min_eig) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ParCSRMaxEigEstimate"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) ); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_ParCSRMaxEigEstimateHost(A, scale, max_eig, min_eig); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_ParCSRMaxEigEstimateDevice(A, scale, max_eig, min_eig); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /** * @brief Uses CG to get the eigenvalue estimate. Will determine whether to use * host or device internally * * @param[in] A Matrix to relax with * @param[in] scale Gets the eigenvalue est of D^{-1/2} A D^{-1/2} * @param[in] max_iter Maximum number of iterations for CG * @param[out] max_eig Estimated max eigenvalue * @param[out] min_eig Estimated min eigenvalue */ HYPRE_Int hypre_ParCSRMaxEigEstimateCG(hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int max_iter, HYPRE_Real *max_eig, HYPRE_Real *min_eig) { #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPushRange("ParCSRMaxEigEstimateCG"); #endif HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1(hypre_ParCSRMatrixMemoryLocation(A)); HYPRE_Int ierr = 0; if (exec == HYPRE_EXEC_HOST) { ierr = hypre_ParCSRMaxEigEstimateCGHost(A, scale, max_iter, max_eig, min_eig); } #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) else { ierr = hypre_ParCSRMaxEigEstimateCGDevice(A, scale, max_iter, max_eig, min_eig); } #endif #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypre_GpuProfilingPopRange(); #endif return ierr; } /** * @brief Uses CG to get the eigenvalue estimate on the host * * @param[in] A Matrix to relax with * @param[in] scale Gets the eigenvalue est of D^{-1/2} A D^{-1/2} * @param[in] max_iter Maximum number of iterations for CG * @param[out] max_eig Estimated max eigenvalue * @param[out] min_eig Estimated min eigenvalue */ HYPRE_Int hypre_ParCSRMaxEigEstimateCGHost( hypre_ParCSRMatrix *A, /* matrix to relax with */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int max_iter, HYPRE_Real *max_eig, HYPRE_Real *min_eig ) { HYPRE_Int i, j, err; hypre_ParVector *p; hypre_ParVector *s; hypre_ParVector *r; hypre_ParVector *ds; hypre_ParVector *u; HYPRE_Real *tridiag = NULL; HYPRE_Real *trioffd = NULL; HYPRE_Real lambda_max ; HYPRE_Real beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv; HYPRE_Real lambda_min; HYPRE_Real *s_data, *p_data, *ds_data, *u_data; HYPRE_Int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A)); /* check the size of A - don't iterate more than the size */ HYPRE_BigInt size = hypre_ParCSRMatrixGlobalNumRows(A); if (size < (HYPRE_BigInt) max_iter) { max_iter = (HYPRE_Int) size; } /* create some temp vectors: p, s, r , ds, u*/ r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(r); p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(p); s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(s); ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(ds); u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize(u); /* point to local data */ s_data = hypre_VectorData(hypre_ParVectorLocalVector(s)); p_data = hypre_VectorData(hypre_ParVectorLocalVector(p)); ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds)); u_data = hypre_VectorData(hypre_ParVectorLocalVector(u)); /* make room for tri-diag matrix */ tridiag = hypre_CTAlloc(HYPRE_Real, max_iter + 1, HYPRE_MEMORY_HOST); trioffd = hypre_CTAlloc(HYPRE_Real, max_iter + 1, HYPRE_MEMORY_HOST); for (i = 0; i < max_iter + 1; i++) { tridiag[i] = 0; trioffd[i] = 0; } /* set residual to random */ hypre_ParVectorSetRandomValues(r, 1); if (scale) { hypre_CSRMatrixExtractDiagonal(hypre_ParCSRMatrixDiag(A), ds_data, 4); } else { /* set ds to 1 */ hypre_ParVectorSetConstantValues(ds, 1.0); } /* gamma = <r,Cr> */ gamma = hypre_ParVectorInnerProd(r, p); /* for the initial filling of the tridiag matrix */ beta = 1.0; i = 0; while (i < max_iter) { /* s = C*r */ /* TO DO: C = diag scale */ hypre_ParVectorCopy(r, s); /*gamma = <r,Cr> */ gamma_old = gamma; gamma = hypre_ParVectorInnerProd(r, s); if (gamma < HYPRE_REAL_EPSILON) { break; } if (i == 0) { beta = 1.0; /* p_0 = C*r */ hypre_ParVectorCopy(s, p); } else { /* beta = gamma / gamma_old */ beta = gamma / gamma_old; /* p = s + beta p */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < local_size; j++) { p_data[j] = s_data[j] + beta * p_data[j]; } } if (scale) { /* s = D^{-1/2}A*D^{-1/2}*p */ for (j = 0; j < local_size; j++) { u_data[j] = ds_data[j] * p_data[j]; } hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s); for (j = 0; j < local_size; j++) { s_data[j] = ds_data[j] * s_data[j]; } } else { /* s = A*p */ hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s); } /* <s,p> */ sdotp = hypre_ParVectorInnerProd(s, p); /* alpha = gamma / <s,p> */ alpha = gamma / sdotp; /* get tridiagonal matrix */ alphainv = 1.0 / alpha; tridiag[i + 1] = alphainv; tridiag[i] *= beta; tridiag[i] += alphainv; trioffd[i + 1] = alphainv; trioffd[i] *= sqrt(beta); /* x = x + alpha*p */ /* don't need */ /* r = r - alpha*s */ hypre_ParVectorAxpy(-alpha, s, r); i++; } /* eispack routine - eigenvalues return in tridiag and ordered*/ hypre_LINPACKcgtql1(&i, tridiag, trioffd, &err); lambda_max = tridiag[i - 1]; lambda_min = tridiag[0]; /* hypre_printf("linpack max eig est = %g\n", lambda_max);*/ /* hypre_printf("linpack min eig est = %g\n", lambda_min);*/ hypre_TFree(tridiag, HYPRE_MEMORY_HOST); hypre_TFree(trioffd, HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(r); hypre_ParVectorDestroy(s); hypre_ParVectorDestroy(p); hypre_ParVectorDestroy(ds); hypre_ParVectorDestroy(u); /* return */ *max_eig = lambda_max; *min_eig = lambda_min; return hypre_error_flag; } /****************************************************************************** Chebyshev relaxation Can specify order 1-4 (this is the order of the resid polynomial)- here we explicitly code the coefficients (instead of iteratively determining) variant 0: standard chebyshev this is rlx 11 if scale = 0, and 16 if scale == 1 variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t) this is rlx 15 if scale = 0, and 17 if scale == 1 ratio indicates the percentage of the whole spectrum to use (so .5 means half, and .1 means 10percent) *******************************************************************************/ HYPRE_Int hypre_ParCSRRelax_Cheby(hypre_ParCSRMatrix *A, /* matrix to relax with */ hypre_ParVector *f, /* right-hand side */ HYPRE_Real max_eig, HYPRE_Real min_eig, HYPRE_Real fraction, HYPRE_Int order, /* polynomial order */ HYPRE_Int scale, /* scale by diagonal?*/ HYPRE_Int variant, hypre_ParVector *u, /* initial/updated approximation */ hypre_ParVector *v, /* temporary vector */ hypre_ParVector *r /*another temp vector */) { HYPRE_Real *coefs = NULL; HYPRE_Real *ds_data = NULL; hypre_ParVector *tmp_vec = NULL; hypre_ParVector *orig_u_vec = NULL; hypre_ParCSRRelax_Cheby_Setup(A, max_eig, min_eig, fraction, order, scale, variant, &coefs, &ds_data); orig_u_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize_v2(orig_u_vec, hypre_ParCSRMatrixMemoryLocation(A)); if (scale) { tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixRowStarts(A)); hypre_ParVectorInitialize_v2(tmp_vec, hypre_ParCSRMatrixMemoryLocation(A)); } hypre_ParCSRRelax_Cheby_Solve(A, f, ds_data, coefs, order, scale, variant, u, v, r, orig_u_vec, tmp_vec); hypre_TFree(ds_data, hypre_ParCSRMatrixMemoryLocation(A)); hypre_TFree(coefs, HYPRE_MEMORY_HOST); hypre_ParVectorDestroy(orig_u_vec); hypre_ParVectorDestroy(tmp_vec); return hypre_error_flag; } /*-------------------------------------------------------------------------- * CG Smoother *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRRelax_CG( HYPRE_Solver solver, hypre_ParCSRMatrix *A, hypre_ParVector *f, hypre_ParVector *u, HYPRE_Int num_its) { HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */ HYPRE_PCGSetTol(solver, 0.0); /* max iterations */ HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u); #if 0 { HYPRE_Int myid; HYPRE_Int num_iterations; HYPRE_Real final_res_norm; hypre_MPI_Comm_rank(hypre_MPI_COMM_WORLD, &myid); HYPRE_PCGGetNumIterations(solver, &num_iterations); HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm); if (myid == 0) { hypre_printf(" -----CG PCG Iterations = %d\n", num_iterations); hypre_printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm); } } #endif return hypre_error_flag; } /* tql1.f -- this is the eispack translation - from Barry Smith in Petsc Note that this routine always uses real numbers (not complex) even if the underlying matrix is Hermitian. This is because the Lanczos process applied to Hermitian matrices always produces a real, symmetric tridiagonal matrix. */ HYPRE_Int hypre_LINPACKcgtql1(HYPRE_Int *n, HYPRE_Real *d, HYPRE_Real *e, HYPRE_Int *ierr) { /* System generated locals */ HYPRE_Int i__1, i__2; HYPRE_Real d__1, d__2, c_b10 = 1.0; /* Local variables */ HYPRE_Real c, f, g, h; HYPRE_Int i, j, l, m; HYPRE_Real p, r, s, c2, c3 = 0.0; HYPRE_Int l1, l2; HYPRE_Real s2 = 0.0; HYPRE_Int ii; HYPRE_Real dl1, el1; HYPRE_Int mml; HYPRE_Real tst1, tst2; /* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */ /* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */ /* WILKINSON. */ /* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */ /* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */ /* TRIDIAGONAL MATRIX BY THE QL METHOD. */ /* ON INPUT */ /* N IS THE ORDER OF THE MATRIX. */ /* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */ /* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */ /* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */ /* ON OUTPUT */ /* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */ /* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */ /* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */ /* THE SMALLEST EIGENVALUES. */ /* E HAS BEEN DESTROYED. */ /* IERR IS SET TO */ /* ZERO FOR NORMAL RETURN, */ /* J IF THE J-TH EIGENVALUE HAS NOT BEEN */ /* DETERMINED AFTER 30 ITERATIONS. */ /* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */ /* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */ /* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY */ /* THIS VERSION DATED AUGUST 1983. */ /* ------------------------------------------------------------------ */ HYPRE_Real ds; --e; --d; *ierr = 0; if (*n == 1) { goto L1001; } i__1 = *n; for (i = 2; i <= i__1; ++i) { e[i - 1] = e[i]; } f = 0.; tst1 = 0.; e[*n] = 0.; i__1 = *n; for (l = 1; l <= i__1; ++l) { j = 0; h = (d__1 = d[l], fabs(d__1)) + (d__2 = e[l], fabs(d__2)); if (tst1 < h) { tst1 = h; } /* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */ i__2 = *n; for (m = l; m <= i__2; ++m) { tst2 = tst1 + (d__1 = e[m], fabs(d__1)); if (tst2 == tst1) { goto L120; } /* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */ /* THROUGH THE BOTTOM OF THE LOOP .......... */ } L120: if (m == l) { goto L210; } L130: if (j == 30) { goto L1000; } ++j; /* .......... FORM SHIFT .......... */ l1 = l + 1; l2 = l1 + 1; g = d[l]; p = (d[l1] - g) / (e[l] * 2.); r = hypre_LINPACKcgpthy(&p, &c_b10); ds = 1.0; if (p < 0.0) { ds = -1.0; } d[l] = e[l] / (p + ds * r); d[l1] = e[l] * (p + ds * r); dl1 = d[l1]; h = g - d[l]; if (l2 > *n) { goto L145; } i__2 = *n; for (i = l2; i <= i__2; ++i) { d[i] -= h; } L145: f += h; /* .......... QL TRANSFORMATION .......... */ p = d[m]; c = 1.; c2 = c; el1 = e[l1]; s = 0.; mml = m - l; /* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */ i__2 = mml; for (ii = 1; ii <= i__2; ++ii) { c3 = c2; c2 = c; s2 = s; i = m - ii; g = c * e[i]; h = c * p; r = hypre_LINPACKcgpthy(&p, &e[i]); e[i + 1] = s * r; s = e[i] / r; c = p / r; p = c * d[i] - s * g; d[i + 1] = h + s * (c * g + s * d[i]); } p = -s * s2 * c3 * el1 * e[l] / dl1; e[l] = s * p; d[l] = c * p; tst2 = tst1 + (d__1 = e[l], fabs(d__1)); if (tst2 > tst1) { goto L130; } L210: p = d[l] + f; /* .......... ORDER EIGENVALUES .......... */ if (l == 1) { goto L250; } /* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */ i__2 = l; for (ii = 2; ii <= i__2; ++ii) { i = l + 2 - ii; if (p >= d[i - 1]) { goto L270; } d[i] = d[i - 1]; } L250: i = 1; L270: d[i] = p; } goto L1001; /* .......... SET ERROR -- NO CONVERGENCE TO AN */ /* EIGENVALUE AFTER 30 ITERATIONS .......... */ L1000: *ierr = l; L1001: return 0; } /* cgtql1_ */ HYPRE_Real hypre_LINPACKcgpthy(HYPRE_Real *a, HYPRE_Real *b) { /* System generated locals */ HYPRE_Real ret_val, d__1, d__2, d__3; /* Local variables */ HYPRE_Real p, r, s, t, u; /* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */ /* Computing MAX */ d__1 = fabs(*a), d__2 = fabs(*b); p = hypre_max(d__1, d__2); if (!p) { goto L20; } /* Computing MIN */ d__2 = fabs(*a), d__3 = fabs(*b); /* Computing 2nd power */ d__1 = hypre_min(d__2, d__3) / p; r = d__1 * d__1; L10: t = r + 4.; if (t == 4.) { goto L20; } s = r / t; u = s * 2. + 1.; p = u * p; /* Computing 2nd power */ d__1 = s / u; r = d__1 * d__1 * r; goto L10; L20: ret_val = p; return ret_val; } /* cgpthy_ */
pack_kernel.h
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #if defined(__ARM_NEON__) || defined(__ARM_NEON) #include <arm_neon.h> #ifdef _OPENMP #include <omp.h> #endif #include "operators/math/math.h" namespace paddle_mobile { namespace operators { namespace math { void pack_lhs_6r(const int m, const int k, const float *A, const int lda, float *output, const bool unroll) { uint32_t mask[8] = {0, 1, 2, 3, 4, 5, 4, 5}; int remain_k = k & 0x3; uint32x4_t vzero = vdupq_n_u32(0); uint32x4_t vmask1 = vcltq_u32(vld1q_u32(mask), vdupq_n_u32(remain_k)); #pragma omp parallel for if (unroll) for (int i = 0; i < m - 5; i += 6) { const float *a0 = A + i * lda; const float *a1 = A + (i + 1) * lda; const float *a2 = A + (i + 2) * lda; const float *a3 = A + (i + 3) * lda; const float *a4 = A + (i + 4) * lda; const float *a5 = A + (i + 5) * lda; float *out_ptr = output + i * k; int loops = k >> 2; if (loops > 0) { #if __aarch64__ for (int l = 0; l < loops; ++l) { float32x4_t _d0 = vld1q_f32(a0); float32x4_t _d1 = vld1q_f32(a1); float32x4_t _d2 = vld1q_f32(a2); float32x4_t _d3 = vld1q_f32(a3); float32x4_t _d4 = vld1q_f32(a4); float32x4_t _d5 = vld1q_f32(a5); float32x4x2_t _q0 = vtrnq_f32(_d0, _d1); float32x4x2_t _q1 = vtrnq_f32(_d2, _d3); float32x4x2_t _q3 = vtrnq_f32(_d4, _d5); _d0 = vcombine_f32(vget_low_f32(_q0.val[0]), vget_low_f32(_q1.val[0])); _d1 = vcombine_f32(vget_low_f32(_q0.val[1]), vget_low_f32(_q1.val[1])); _d2 = vcombine_f32(vget_high_f32(_q0.val[0]), vget_high_f32(_q1.val[0])); _d3 = vcombine_f32(vget_high_f32(_q0.val[1]), vget_high_f32(_q1.val[1])); vst1q_f32(out_ptr, _d0); vst1_f32(out_ptr + 4, vget_low_f32(_q3.val[0])); vst1q_f32(out_ptr + 6, _d1); vst1_f32(out_ptr + 10, vget_low_f32(_q3.val[1])); vst1q_f32(out_ptr + 12, _d2); vst1_f32(out_ptr + 16, vget_high_f32(_q3.val[0])); vst1q_f32(out_ptr + 18, _d3); vst1_f32(out_ptr + 22, vget_high_f32(_q3.val[1])); a0 += 4; a1 += 4; a2 += 4; a3 += 4; a4 += 4; a5 += 4; out_ptr += 24; } #else asm volatile( "loop_4k_%=: \n" "vld1.32 {d0-d1}, [%[a0]]! \n" "vld1.32 {d2-d3}, [%[a1]]! \n" "vld1.32 {d4-d5}, [%[a2]]! \n" "vld1.32 {d6-d7}, [%[a3]]! \n" "vld1.32 {d8-d9}, [%[a4]]! \n" "vld1.32 {d10-d11}, [%[a5]]! \n" "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q4, q5 \n" "vswp.32 d1, d4 \n" "vswp.32 d3, d6 \n" "vst1.32 {q0}, [%[out]]! \n" "vst1.32 {d8}, [%[out]]! \n" "vst1.32 {q1}, [%[out]]! \n" "vst1.32 {d10}, [%[out]]! \n" "vst1.32 {q2}, [%[out]]! \n" "vst1.32 {d9}, [%[out]]! \n" "vst1.32 {q3}, [%[out]]! \n" "vst1.32 {d11}, [%[out]]! \n" "subs %[loops], #1 \n" "bne loop_4k_%= \n" : [out] "+r"(out_ptr), [a0] "+r"(a0), [a1] "+r"(a1), [a2] "+r"(a2), [a3] "+r"(a3), [a4] "+r"(a4), [a5] "+r"(a5), [loops] "+r"(loops) : : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5"); #endif } if (remain_k > 0) { float32x4_t _d0 = vld1q_f32(a0); float32x4_t _d1 = vld1q_f32(a1); float32x4_t _d2 = vld1q_f32(a2); float32x4_t _d3 = vld1q_f32(a3); float32x4_t _d4 = vld1q_f32(a4); float32x4_t _d5 = vld1q_f32(a5); _d0 = vandq_f32_u32(_d0, vmask1); _d1 = vandq_f32_u32(_d1, vmask1); _d2 = vandq_f32_u32(_d2, vmask1); _d3 = vandq_f32_u32(_d3, vmask1); _d4 = vandq_f32_u32(_d4, vmask1); _d5 = vandq_f32_u32(_d5, vmask1); float32x4x2_t _q0 = vtrnq_f32(_d0, _d1); float32x4x2_t _q1 = vtrnq_f32(_d2, _d3); float32x4x2_t _q3 = vtrnq_f32(_d4, _d5); _d0 = vcombine_f32(vget_low_f32(_q0.val[0]), vget_low_f32(_q1.val[0])); _d1 = vcombine_f32(vget_low_f32(_q0.val[1]), vget_low_f32(_q1.val[1])); _d2 = vcombine_f32(vget_high_f32(_q0.val[0]), vget_high_f32(_q1.val[0])); switch (remain_k) { case 3: vst1q_f32(out_ptr + 12, _d2); vst1_f32(out_ptr + 16, vget_high_f32(_q3.val[0])); case 2: vst1q_f32(out_ptr + 6, _d1); vst1_f32(out_ptr + 10, vget_low_f32(_q3.val[1])); case 1: vst1q_f32(out_ptr, _d0); vst1_f32(out_ptr + 4, vget_low_f32(_q3.val[0])); default: break; } } } int remain_m = m % 6; if (remain_m) { int remain_m_start = m - remain_m; const float *a0 = A + remain_m_start * lda; const float *a1 = a0 + lda; const float *a2 = a0 + 2 * lda; const float *a3 = a0 + 3 * lda; const float *a4 = a0 + 4 * lda; const float *a5 = a0 + 5 * lda; float *out_ptr = output + remain_m_start * k; uint32x4_t vmask2 = vcltq_u32(vld1q_u32(mask), vdupq_n_u32(remain_m)); uint32x4_t vmask3 = vcltq_u32(vld1q_u32(mask + 4), vdupq_n_u32(remain_m)); const float zerobuff[4] = {0.f, 0.f, 0.f, 0.f}; int lk = 0; for (; lk < k - 3; lk += 4) { switch (remain_m) { case 1: a1 = zerobuff; case 2: a2 = zerobuff; case 3: a3 = zerobuff; case 4: a4 = zerobuff; case 5: a5 = zerobuff; default: break; } #if __aarch64__ float32x4_t _d0 = vld1q_f32(a0); float32x4_t _d1 = vld1q_f32(a1); float32x4_t _d2 = vld1q_f32(a2); float32x4_t _d3 = vld1q_f32(a3); float32x4_t _d4 = vld1q_f32(a4); float32x4_t _d5 = vld1q_f32(a5); float32x4x2_t _q0 = vtrnq_f32(_d0, _d1); float32x4x2_t _q1 = vtrnq_f32(_d2, _d3); float32x4x2_t _q3 = vtrnq_f32(_d4, _d5); _d0 = vcombine_f32(vget_low_f32(_q0.val[0]), vget_low_f32(_q1.val[0])); _d1 = vcombine_f32(vget_low_f32(_q0.val[1]), vget_low_f32(_q1.val[1])); _d2 = vcombine_f32(vget_high_f32(_q0.val[0]), vget_high_f32(_q1.val[0])); _d3 = vcombine_f32(vget_high_f32(_q0.val[1]), vget_high_f32(_q1.val[1])); _d0 = vandq_f32_u32(_d0, vmask2); _d1 = vandq_f32_u32(_d1, vmask2); _d2 = vandq_f32_u32(_d2, vmask2); _d3 = vandq_f32_u32(_d3, vmask2); _d4 = vandq_f32_u32(_q3.val[0], vmask3); _d5 = vandq_f32_u32(_q3.val[1], vmask3); vst1q_f32(out_ptr, _d0); vst1_f32(out_ptr + 4, vget_low_f32(_d4)); vst1q_f32(out_ptr + 6, _d1); vst1_f32(out_ptr + 10, vget_low_f32(_d5)); vst1q_f32(out_ptr + 12, _d2); vst1_f32(out_ptr + 16, vget_high_f32(_d4)); vst1q_f32(out_ptr + 18, _d3); vst1_f32(out_ptr + 22, vget_high_f32(_d5)); a0 += 4; a1 += 4; a2 += 4; a3 += 4; a4 += 4; a5 += 4; out_ptr += 24; #else asm volatile( "vld1.32 {d0-d1}, [%[a0]]! \n" "vld1.32 {d2-d3}, [%[a1]]! \n" "vld1.32 {d4-d5}, [%[a2]]! \n" "vld1.32 {d6-d7}, [%[a3]]! \n" "vld1.32 {d8-d9}, [%[a4]]! \n" "vld1.32 {d10-d11}, [%[a5]]! \n" "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q4, q5 \n" "vswp.32 d1, d4 \n" "vswp.32 d3, d6 \n" "vbif q0, %q[vzero], %q[vmask2] \n" "vbif q1, %q[vzero], %q[vmask2] \n" "vbif q2, %q[vzero], %q[vmask2] \n" "vbif q3, %q[vzero], %q[vmask2] \n" "vbif q4, %q[vzero], %q[vmask3] \n" "vbif q5, %q[vzero], %q[vmask3] \n" "vst1.32 {q0}, [%[out]]! \n" "vst1.32 {d8}, [%[out]]! \n" "vst1.32 {q1}, [%[out]]! \n" "vst1.32 {d10}, [%[out]]! \n" "vst1.32 {q2}, [%[out]]! \n" "vst1.32 {d9}, [%[out]]! \n" "vst1.32 {q3}, [%[out]]! \n" "vst1.32 {d11}, [%[out]]! \n" : [out] "+r"(out_ptr), [a0] "+r"(a0), [a1] "+r"(a1), [a2] "+r"(a2), [a3] "+r"(a3), [a4] "+r"(a4), [a5] "+r"(a5) : [vmask2] "w"(vmask2), [vmask3] "w"(vmask3), [vzero] "w"(vzero) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5"); #endif } // remain k switch (remain_m) { case 1: a1 = zerobuff; case 2: a2 = zerobuff; case 3: a3 = zerobuff; case 4: a4 = zerobuff; case 5: a5 = zerobuff; default: break; } for (; lk < k; ++lk) { *out_ptr++ = *a0++; *out_ptr++ = *a1++; *out_ptr++ = *a2++; *out_ptr++ = *a3++; *out_ptr++ = *a4++; *out_ptr++ = *a5++; } } } #if __aarch64__ void pack_rhs_16c(int k, int n, const float *B, int ldb, float *output, const bool unroll) { uint32_t mask[8] = {0, 1, 2, 3, 4, 5, 6, 7}; uint32_t remain_n = n & 0x7; float32x4_t vzero = vdupq_n_f32(0.f); uint32x4_t vmask1 = vcltq_u32(vld1q_u32(mask), vdupq_n_u32(remain_n)); uint32x4_t vmask2 = vcltq_u32(vld1q_u32(mask + 4), vdupq_n_u32(remain_n)); #pragma omp parallel for if (unroll) for (int i = 0; i < k - 3; i += 4) { const float *b0 = B + i * ldb; const float *b1 = b0 + ldb; const float *b2 = b1 + ldb; const float *b3 = b2 + ldb; int j = 0; asm volatile( "prfm pldl1keep, [%[b0]] \n" "prfm pldl1keep, [%[b1]] \n" "prfm pldl1keep, [%[b2]] \n" "prfm pldl1keep, [%[b3]] \n" : : [b0] "r"(b0), [b1] "r"(b1), [b2] "r"(b2), [b3] "r"(b3)); for (; j < n - 15; j += 16) { float *out_ptr0 = output + j * k + 16 * i; asm volatile( "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%[b0]], #64 \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%[b1]], #64 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%[out_ptr0]], #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%[out_ptr0]], #64 \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%[b2]], #64 \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%[b3]], #64 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%[out_ptr0]], #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%[out_ptr0]], #64 \n" : [out_ptr0] "+r"(out_ptr0), [b0] "+r"(b0), [b1] "+r"(b1), [b2] "+r"(b2), [b3] "+r"(b3) : : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); } for (; j < n - 7; j += 8) { float *out_ptr0 = output + (j & 0xFFFFFFF0) * k + 16 * i + (j & 0xF); int step = 64; asm volatile( "ld1 {v0.4s, v1.4s}, [%[b0]], #32 \n" "ld1 {v2.4s, v3.4s}, [%[b1]], #32 \n" "ld1 {v4.4s, v5.4s}, [%[b2]], #32 \n" "ld1 {v6.4s, v7.4s}, [%[b3]], #32 \n" "st1 {v0.4s, v1.4s}, [%[out_ptr0]], %[step] \n" "st1 {v2.4s, v3.4s}, [%[out_ptr0]], %[step] \n" "st1 {v4.4s, v5.4s}, [%[out_ptr0]], %[step] \n" "st1 {v6.4s, v7.4s}, [%[out_ptr0]], %[step] \n" : [out_ptr0] "+r"(out_ptr0), [b0] "+r"(b0), [b1] "+r"(b1), [b2] "+r"(b2), [b3] "+r"(b3) : [step] "r"(step) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); } if (j < n) { float *out_ptr0 = output + (j & 0xFFFFFFF0) * k + 16 * i + (j & 0xF); int step = 64; asm volatile( "ld1 {v0.4s, v1.4s}, [%[b0]] \n" "ld1 {v2.4s, v3.4s}, [%[b1]] \n" "ld1 {v4.4s, v5.4s}, [%[b2]] \n" "ld1 {v6.4s, v7.4s}, [%[b3]] \n" "and v0.16b, v0.16b, %[vmask1].16b \n" "and v1.16b, v1.16b, %[vmask2].16b \n" "and v2.16b, v2.16b, %[vmask1].16b \n" "and v3.16b, v3.16b, %[vmask2].16b \n" "and v4.16b, v4.16b, %[vmask1].16b \n" "and v5.16b, v5.16b, %[vmask2].16b \n" "and v6.16b, v6.16b, %[vmask1].16b \n" "and v7.16b, v7.16b, %[vmask2].16b \n" "st1 {v0.4s, v1.4s}, [%[out_ptr0]], %[step] \n" "st1 {v2.4s, v3.4s}, [%[out_ptr0]], %[step] \n" "st1 {v4.4s, v5.4s}, [%[out_ptr0]], %[step] \n" "st1 {v6.4s, v7.4s}, [%[out_ptr0]], %[step] \n" : [out_ptr0] "+r"(out_ptr0) : [vmask1] "w"(vmask1), [vmask2] "w"(vmask2), [b0] "r"(b0), [b1] "r"(b1), [b2] "r"(b2), [b3] "r"(b3), [step] "r"(step) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); j += 8; } if (j & 0xf) { float *out_ptr0 = output + (j & 0xFFFFFFF0) * k + 16 * i + (j & 0xF); vst1q_f32(out_ptr0, vzero); vst1q_f32(out_ptr0 + 4, vzero); out_ptr0 += 16; vst1q_f32(out_ptr0, vzero); vst1q_f32(out_ptr0 + 4, vzero); out_ptr0 += 16; vst1q_f32(out_ptr0, vzero); vst1q_f32(out_ptr0 + 4, vzero); out_ptr0 += 16; vst1q_f32(out_ptr0, vzero); vst1q_f32(out_ptr0 + 4, vzero); } } // remain k for (int i = (k & 0xFFFFFFFC); i < k; ++i) { const float *b0 = B + i * ldb; int j = 0; asm volatile("prfm pldl1keep, [%[b0]] \n" : : [b0] "r"(b0)); for (; j < n - 15; j += 16) { float *out_ptr0 = output + j * k + 16 * i; asm volatile( "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%[b0]], #64 \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%[out_ptr0]], #64 \n" : [out_ptr0] "+r"(out_ptr0), [b0] "+r"(b0) : : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); } for (; j < n - 7; j += 8) { float *out_ptr0 = output + (j & 0xFFFFFFF0) * k + 16 * i + (j & 0xF); int step = 64; asm volatile( "ld1 {v0.4s, v1.4s}, [%[b0]], #32 \n" "st1 {v0.4s, v1.4s}, [%[out_ptr0]], %[step] \n" : [out_ptr0] "+r"(out_ptr0), [b0] "+r"(b0) : [step] "r"(step) : "memory", "v0", "v1"); } if (j < n) { float *out_ptr0 = output + (j & 0xFFFFFFF0) * k + 16 * i + (j & 0xF); asm volatile( "ld1 {v0.4s, v1.4s}, [%[b0]] \n" "and v0.16b, v0.16b, %[vmask1].16b \n" "and v1.16b, v1.16b, %[vmask2].16b \n" "st1 {v0.4s, v1.4s}, [%[out_ptr0]] \n" : [out_ptr0] "+r"(out_ptr0) : [vmask1] "w"(vmask1), [vmask2] "w"(vmask2), [b0] "r"(b0) : "memory", "v0", "v1"); j += 8; } if (j & 0xf) { float *out_ptr0 = output + (j & 0xFFFFFFF0) * k + 16 * i + (j & 0xF); vst1q_f32(out_ptr0, vzero); vst1q_f32(out_ptr0 + 4, vzero); } } } #else void pack_rhs_8c(int k, int n, const float *B, int ldb, float *output, const bool unroll) { uint32_t mask[8] = {0, 1, 2, 3, 4, 5, 6, 7}; uint32_t remain_n = n & 0x7; uint32x4_t vmask1 = vcltq_u32(vld1q_u32(mask), vdupq_n_u32(remain_n)); uint32x4_t vmask2 = vcltq_u32(vld1q_u32(mask + 4), vdupq_n_u32(remain_n)); #pragma omp parallel for if (unroll) for (int i = 0; i < k - 3; i += 4) { const float *b0 = B + i * ldb; const float *b1 = b0 + ldb; const float *b2 = b1 + ldb; const float *b3 = b2 + ldb; int j = 0; for (; j < n - 15; j += 16) { float *out_ptr0 = output + j * k + 8 * i; float *out_ptr1 = out_ptr0 + 8 * k; asm volatile( "vld1.32 {q0, q1}, [%[b0]]! \n" "vld1.32 {q2, q3}, [%[b1]]! \n" "vld1.32 {q4, q5}, [%[b0]]! \n" "vld1.32 {q6, q7}, [%[b1]]! \n" "vst1.32 {q0, q1}, [%[out_ptr0]]! \n" "vst1.32 {q2, q3}, [%[out_ptr0]]! \n" "vst1.32 {q4, q5}, [%[out_ptr1]]! \n" "vst1.32 {q6, q7}, [%[out_ptr1]]! \n" "vld1.32 {q0, q1}, [%[b2]]! \n" "vld1.32 {q2, q3}, [%[b3]]! \n" "vld1.32 {q4, q5}, [%[b2]]! \n" "vld1.32 {q6, q7}, [%[b3]]! \n" "vst1.32 {q0, q1}, [%[out_ptr0]]! \n" "vst1.32 {q2, q3}, [%[out_ptr0]]! \n" "vst1.32 {q4, q5}, [%[out_ptr1]]! \n" "vst1.32 {q6, q7}, [%[out_ptr1]]! \n" : [out_ptr0] "+r"(out_ptr0), [out_ptr1] "+r"(out_ptr1), [b0] "+r"(b0), [b1] "+r"(b1), [b2] "+r"(b2), [b3] "+r"(b3) : : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"); } for (; j < n - 7; j += 8) { float *out_ptr0 = output + j * k + 8 * i; asm volatile( "vld1.32 {q0, q1}, [%[b0]]! \n" "vld1.32 {q2, q3}, [%[b1]]! \n" "vld1.32 {q4, q5}, [%[b2]]! \n" "vld1.32 {q6, q7}, [%[b3]]! \n" "vst1.32 {q0, q1}, [%[out_ptr0]]! \n" "vst1.32 {q2, q3}, [%[out_ptr0]]! \n" "vst1.32 {q4, q5}, [%[out_ptr0]]! \n" "vst1.32 {q6, q7}, [%[out_ptr0]]! \n" : [out_ptr0] "+r"(out_ptr0), [b0] "+r"(b0), [b1] "+r"(b1), [b2] "+r"(b2), [b3] "+r"(b3) : : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"); } if (j < n) { float *out_ptr0 = output + j * k + 8 * i; asm volatile( "vld1.32 {q0, q1}, [%[b0]] \n" "vld1.32 {q2, q3}, [%[b1]] \n" "vld1.32 {q4, q5}, [%[b2]] \n" "vld1.32 {q6, q7}, [%[b3]] \n" "vand q0, q0, %q[vmask1] \n" "vand q1, q1, %q[vmask2] \n" "vand q2, q2, %q[vmask1] \n" "vand q3, q3, %q[vmask2] \n" "vand q4, q4, %q[vmask1] \n" "vand q5, q5, %q[vmask2] \n" "vand q6, q6, %q[vmask1] \n" "vand q7, q7, %q[vmask2] \n" "vst1.32 {q0, q1}, [%[out_ptr0]]! \n" "vst1.32 {q2, q3}, [%[out_ptr0]]! \n" "vst1.32 {q4, q5}, [%[out_ptr0]]! \n" "vst1.32 {q6, q7}, [%[out_ptr0]]! \n" : [out_ptr0] "+r"(out_ptr0) : [vmask1] "w"(vmask1), [vmask2] "w"(vmask2), [b0] "r"(b0), [b1] "r"(b1), [b2] "r"(b2), [b3] "r"(b3) : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"); } } // remain k for (int i = (k & 0xFFFFFFFC); i < k; ++i) { const float *b0 = B + i * ldb; int j = 0; for (; j < n - 15; j += 16) { float *out_ptr0 = output + j * k + 8 * i; float *out_ptr1 = out_ptr0 + 8 * k; asm volatile( "vld1.32 {q0, q1}, [%[b0]]! \n" "vld1.32 {q2, q3}, [%[b0]]! \n" "vst1.32 {q0, q1}, [%[out_ptr0]]! \n" "vst1.32 {q2, q3}, [%[out_ptr1]]! \n" : [out_ptr0] "+r"(out_ptr0), [out_ptr1] "+r"(out_ptr1), [b0] "+r"(b0) : : "memory", "q0", "q1", "q2", "q3"); } for (; j < n - 7; j += 8) { float *out_ptr0 = output + j * k + 8 * i; asm volatile( "vld1.32 {q0, q1}, [%[b0]]! \n" "vst1.32 {q0, q1}, [%[out_ptr0]]! \n" : [out_ptr0] "+r"(out_ptr0), [b0] "+r"(b0) : : "memory", "q0", "q1"); } if (j < n) { float *out_ptr0 = output + j * k + 8 * i; asm volatile( "vld1.32 {q0, q1}, [%[b0]] \n" "vand q0, q0, %q[vmask1] \n" "vand q1, q1, %q[vmask2] \n" "vst1.32 {q0, q1}, [%[out_ptr0]] \n" : [out_ptr0] "+r"(out_ptr0) : [vmask1] "w"(vmask1), [vmask2] "w"(vmask2), [b0] "r"(b0) : "memory", "q0", "q1"); } } } #endif // __aarch64__ void write_back_alpha_beta(const int mc, const int nc, const float alpha, const float *c, const int ldc1, const float beta, float *C, const int ldc2) { int nc1 = nc / 4; int _nc1 = nc % 4; float32x4_t _alpha = vdupq_n_f32(alpha); float32x4_t _beta = vdupq_n_f32(beta); float32x4_t cv, cv2; for (int i = 0; i < mc; ++i) { const float *c_ptr = c + i * ldc1; float *C_ptr = C + i * ldc2; for (int j = 0; j < nc1; ++j) { cv = vld1q_f32(c_ptr); cv = vmulq_f32(_alpha, cv); cv2 = vld1q_f32(C_ptr); cv = vmlaq_f32(cv, _beta, cv2); vst1q_f32(C_ptr, cv); c_ptr += 4; C_ptr += 4; } if (_nc1 != 0) { cv = vld1q_f32(c_ptr); cv = vmulq_f32(_alpha, cv); cv2 = vld1q_f32(C_ptr); cv = vmlaq_f32(cv, _beta, cv2); switch (_nc1) { case 3: vst1q_lane_f32(C_ptr + 2, cv, 2); case 2: vst1_f32(C_ptr, vget_low_f32(cv)); break; case 1: vst1q_lane_f32(C_ptr, cv, 0); break; } } } } #if __aarch64__ void write_back_alpha1_beta0(const int mc, const int nc, const float *c, const int ldc1, float *C, const int ldc2) { int nc1 = nc / 4; int _nc1 = nc % 4; const float *c_ptr; float *C_ptr; float32x4_t cv; for (int i = 0; i < mc; ++i) { c_ptr = c + i * ldc1; C_ptr = C + i * ldc2; for (int j = 0; j < nc1; ++j) { cv = vld1q_f32(c_ptr); vst1q_f32(C_ptr, cv); c_ptr += 4; C_ptr += 4; } if (_nc1 != 0) { cv = vld1q_f32(c_ptr); switch (_nc1) { case 3: vst1q_lane_f32(C_ptr + 2, cv, 2); case 2: vst1_f32(C_ptr, vget_low_f32(cv)); break; case 1: vst1q_lane_f32(C_ptr, cv, 0); break; } } } } void write_back_alpha1_beta1(const int mc, const int nc, const float *c, const int ldc1, float *C, const int ldc2) { int nc1 = nc / 4; int _nc1 = nc % 4; const float *c_ptr; float *C_ptr; float32x4_t cv, cv2; for (int i = 0; i < mc; ++i) { c_ptr = c + i * ldc1; C_ptr = C + i * ldc2; for (int j = 0; j < nc1; ++j) { cv = vld1q_f32(c_ptr); cv2 = vld1q_f32(C_ptr); cv = vaddq_f32(cv, cv2); vst1q_f32(C_ptr, cv); c_ptr += 4; C_ptr += 4; } if (_nc1 != 0) { cv = vld1q_f32(c_ptr); cv2 = vld1q_f32(C_ptr); cv = vaddq_f32(cv, cv2); switch (_nc1) { case 3: vst1q_lane_f32(C_ptr + 2, cv, 2); case 2: vst1_f32(C_ptr, vget_low_f32(cv)); break; case 1: vst1q_lane_f32(C_ptr, cv, 0); break; } } } } #else void write_back_alpha1_beta0(const int mc, const int nc, const float *c, const int ldc1, float *C, const int ldc2) { int nc1 = nc / 16; int nc2 = nc % 16; int step1 = 4 * (ldc1 - 16 * nc1); int step2 = 4 * ldc2; int volatile m = mc; const float *volatile c_ptr = c; float *volatile C_ptr = C; if (nc1 > 0) { asm volatile( "subs %[mc], %[mc], #1 \n\t" "blt end_mc_%= \n\t" "loop_mc_%=: \n\t" "mov r6, %[C_ptr] \n\t" "mov r5, %[nc1] \n\t" "subs r5, r5, #1 \n\t" "blt end_nc1_%= \n\t" "loop_nc1_%=: \n\t" "vld1.32 {q0, q1}, [%[c_ptr]]! \n\t" "vst1.32 {q0, q1}, [r6]! \n\t" "vld1.32 {q2, q3}, [%[c_ptr]]! \n\t" "vst1.32 {q2, q3}, [r6]! \n\t" "subs r5, r5, #1 \n\t" "bge loop_nc1_%= \n\t" "end_nc1_%=: \n\t" "add %[c_ptr], %[c_ptr], %[step1] \n\t" "add %[C_ptr], %[C_ptr], %[step2] \n\t" "subs %[mc], %[mc], #1 \n\t" "bge loop_mc_%= \n\t" "end_mc_%=: \n\t" : : [C_ptr] "r"(C_ptr), [c_ptr] "r"(c_ptr), [mc] "r"(m), [nc1] "r"(nc1), [step1] "r"(step1), [step2] "r"(step2) : "memory", "r5", "r6", "q0", "q1", "q2", "q3"); } if (nc2 != 0) { for (int i = 0; i < mc; i++) { const float *c0 = c_ptr + nc1 * 16 + i * ldc1; float *C0 = C_ptr + nc1 * 16 + i * ldc2; for (int j = 0; j < nc2; j++) { *C0++ = *c0++; } } } } void write_back_alpha1_beta1(const int mc, const int nc, const float *c, const int ldc1, float *C, const int ldc2) { int nc1 = nc / 16; int nc2 = nc % 16; int step1 = 4 * (ldc1 - 16 * nc1); int step2 = 4 * ldc2; int volatile m = mc; const float *volatile c_ptr = c; float *volatile C_ptr = C; if (nc1 > 0) { asm volatile( "subs %[mc], %[mc], #1 \n\t" "blt end_mc_%= \n\t" "loop_mc_%=: \n\t" "mov r6, %[C_ptr] \n\t" "mov r5, %[nc1] \n\t" "subs r5, r5, #1 \n\t" "blt end_nc1_%= \n\t" "loop_nc1_%=: \n\t" "vld1.32 {q0, q1}, [%[c_ptr]]! \n\t" "vld1.32 {q2, q3}, [r6] \n\t" "vadd.f32 q0, q0, q2 \n\t" "vadd.f32 q1, q1, q3 \n\t" "vst1.32 {q0, q1}, [r6]! \n\t" "vld1.32 {q0, q1}, [%[c_ptr]]! \n\t" "vld1.32 {q2, q3}, [r6] \n\t" "vadd.f32 q0, q0, q2 \n\t" "vadd.f32 q1, q1, q3 \n\t" "vst1.32 {q0, q1}, [r6]! \n\t" "subs r5, r5, #1 \n\t" "bge loop_nc1_%= \n\t" "end_nc1_%=: \n\t" "add %[c_ptr], %[c_ptr], %[step1] \n\t" "add %[C_ptr], %[C_ptr], %[step2] \n\t" "subs %[mc], %[mc], #1 \n\t" "bge loop_mc_%= \n\t" "end_mc_%=: \n\t" : : [C_ptr] "r"(C_ptr), [c_ptr] "r"(c_ptr), [mc] "r"(m), [nc1] "r"(nc1), [step1] "r"(step1), [step2] "r"(step2) : "memory", "r5", "r6", "q0", "q1", "q2", "q3"); } if (nc2 != 0) { for (int i = 0; i < mc; i++) { const float *c0 = c_ptr + nc1 * 16 + i * ldc1; float *C0 = C_ptr + nc1 * 16 + i * ldc2; for (int j = 0; j < nc2; j++) { *C0++ += *c0++; } } } } #endif // __aarch64__ void write_back(const int mc, const int nc, const float alpha, const float *c, const int ldc1, const float beta, float *C, const int ldc2) { if (alpha == 1.f && beta == 0.f) { write_back_alpha1_beta0(mc, nc, c, ldc1, C, ldc2); } else if (alpha == 1.f && beta == 1.f) { write_back_alpha1_beta1(mc, nc, c, ldc1, C, ldc2); } else { write_back_alpha_beta(mc, nc, alpha, c, ldc1, beta, C, ldc2); } } } // namespace math } // namespace operators } // namespace paddle_mobile #endif // __ARM_NEON__
temp.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #define IF if (v[i] > max) #define NUM_EXEC 5 static void populate_vector(size_t N, int v[]) { size_t i; srand(1337); for (i = 0; i < N; ++i) v[i] = rand(); } static double contention_test(size_t N, int T, const int v[]) { size_t i; int max = -1; double t0, t1; omp_lock_t mutex; omp_init_lock(&mutex); t0 = omp_get_wtime(); #pragma omp parallel for private(i) num_threads(T) for (i = 0; i < N; ++i) { /*--*/ IF IF IF IF IF IF /*--*/ { omp_set_lock(&mutex); { if (v[i] > max) max = v[i]; } omp_unset_lock(&mutex); } } t1 = omp_get_wtime(); return (t1-t0); } static double avg(int n, const double v[]) { int i; double sum = 0.; for (i = 0; i < n; ++i) sum += v[i]; return sum/n; } int main(int argc, char* argv[]) { static double times[NUM_EXEC]; int* vector = NULL; size_t N; int T, i; if (argc != 4) { fprintf(stdout, "Usage: %s <vector_size> <number_of_threads>\n ", argv[0]); return 1; } N = atoll(argv[1]); T = atoi(argv[2]); int last = atoi(argv[3]) == 9; vector = (int*) malloc(N*sizeof(int)); if (!vector) { fprintf(stdout, "Failed to allocate memory. Exiting...\n"); return 2; } populate_vector(N, vector); /*throw away first execution*/ times[0] = contention_test(N, T, vector); for (i = 0; i < NUM_EXEC; ++i) times[i] = contention_test(N, T, vector); if(!last) fprintf(stdout, "%lf,",avg(NUM_EXEC, times)); else fprintf(stdout, "%lf\n",avg(NUM_EXEC, times)); free(vector); return 0; }
LAGraph_bfs_pushpull.c
//------------------------------------------------------------------------------ // LAGraph_bfs_pushpull: push-pull breadth-first search //------------------------------------------------------------------------------ /* LAGraph: graph algorithms based on GraphBLAS Copyright 2020 LAGraph Contributors. (see Contributors.txt for a full list of Contributors; see ContributionInstructions.txt for information on how you can Contribute to this project). All Rights Reserved. NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. Released under a BSD license, please see the LICENSE file distributed with this Software or contact permission@sei.cmu.edu for full terms. Created, in part, with funding and support from the United States Government. (see Acknowledgments.txt file). This program includes and/or can make use of certain third party source code, object code, documentation and other files ("Third Party Software"). See LICENSE file for more details. */ //------------------------------------------------------------------------------ // LAGraph_bfs_pushpull: direction-optimized push/pull breadth first search, // contributed by Tim Davis, Texas A&M. // LAGraph_bfs_pushpull computes the BFS of a graph from a single given // source node. The result is a vector v where v(i)=k if node i was placed // at level k in the BFS. // Usage: // info = LAGraph_bfs_pushpull (&v, &pi, A, AT, source, max_level, vsparse) ; // GrB_Vector *v: a vector containing the result, created on output. // v(i) = k is the BFS level of node i in the graph, where a source // node has v(source)=1. v(i) is implicitly zero if it is unreachable // from the source node. That is, GrB_Vector_nvals (&nreach,v) is the // size of the reachable set of the source node, for a single-source // BFS. v may be returned as sparse, or full. If full, v(i)=0 // indicates that node i was not reached. If sparse, the pattern of v // indicates the set of nodes reached. // GrB_Vector *pi: a vector containing the BFS tree, in 1-based indexing. // pi(source) = source+1 for source node. pi(i) = p+1 if p is the // parent of i. If pi is sparse, and pi(i) is not present, then node // i has not been reached. Otherwise, if pi is full, then pi(i)=0 // indicates that node i was not reached. // GrB_Matrix A: a square matrix of any type. The values of A are not // accessed. The presence of the entry A(i,j) indicates the edge // (i,j). That is, an explicit entry A(i,j)=0 is treated as an edge. // GrB_Matrix AT: an optional matrix of any type. If NULL, the algorithm // is a conventional push-only BFS. If not NULL, AT must be the // transpose of A, and a push-pull algorithm is used (NOTE: this // assumes GraphBLAS stores its matrix in CSR form; see discussion // below). Results are undefined if AT is not NULL but not identical // to the transpose of A. // int64_t source: the source node for the BFS. // int64_t max_level: An optional limit on the levels searched for the // single-source BFS. If zero, then no limit is enforced. If > 0, // then only nodes with v(i) <= max_level will be visited. That is: // 1: just the source node, 2: the source and its neighbors, 3: the // source node, its neighbors, and their neighbors, etc. // bool vsparse: if the result v may remain very sparse, then set this // parameter to true. If v might have many entries, set it false. If // you are unsure, then set it to true. This parameter speeds up // the handling of v. If you guess wrong, there is a slight // performance penalty. The results are not affected by this // parameter, just the performance. This parameter is used only for // the single-source BFS. // single-source BFS: // Given a graph A, a source node, find all nodes reachable from the // source node. v(source)=1, v(i)=2 if edge (source,i) appears in the // graph, and so on. If node i is not reachable from source, then // implicitly v(i)=0. v is returned as a sparse vector, and v(i) is not // an entry in this vector. // This algorithm can use the push-pull strategy, which requires both A and // AT=A' to be passed in. If the graph is known to be symmetric, then the same // matrix A can be passed in for both arguments. Results are undefined if AT // is not the transpose of A. // If only A or AT is passed in, then only single strategy will be used: push // or pull, but not both. In general, push-only performs well. A pull-only // strategy is possible but it is exceedingly slow. Assuming A and AT are both // in CSR format, then (let s = source node): // LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest) // LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // push-only (good) // LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // pull-only (slow!) // If A and AT are both in CSC format, then: // LAGraph_bfs_pushpull (..., A, AT, s, ...) ; // push-pull (fastest) // LAGraph_bfs_pushpull (..., NULL, AT, s, ...) ; // push-only (good) // LAGraph_bfs_pushpull (..., A, NULL, s, ...) ; // pull-only (slow!) // Since the pull-only method is exceedingly slow, SuiteSparse:GraphBLAS // detects this case and refuses to do it. // The basic step of this algorithm computes A'*q where q is the 'queue' of // nodes in the current level. This can be done with GrB_vxm(q,A) = (q'*A)' = // A'*q, or by GrB_mxv(AT,q) = AT*q = A'*q. Both steps compute the same thing, // just in a different way. In GraphBLAS, unlike MATLAB, a GrB_Vector is // simultaneously a row and column vector, so q and q' are interchangeable. // To implement an efficient BFS using GraphBLAS, an assumption must be made in // LAGraph about how the matrix is stored, whether by row or by column (or // perhaps some other opaque data structure). The storage format has a huge // impact on the relative performance of vxm(q,A) and mxv(AT,q). // Storing A by row, if A(i,j) is the edge (i,j), means that A(i,:) is easily // accessible. In terms of the graph A, this means that the out-adjacency // list of node i can be traversed in time O(out-degree of node i). // If AT is stored by row, then AT(i,:) is the in-adjacency list of node i, // and traversing row i of AT can be done in O(in-degree of node i) time. // The CSR (Compressed Sparse Row) format is the default for // SuiteSparse:GraphBLAS, but no assumption can be made about any particular // GraphBLAS library implementation. // If A and AT are both stored by column instead, then A(i,:) is not easy to // access. Instead, A(:,i) is the easily-accessible in-adjacency of node i, // and AT(:,i) is the out-adjancency. // A push step requires the out-adjacencies of each node, where as // a pull step requires the in-adjacencies of each node. // vxm(q,A) = A'*q, with A stored by row: a push step // mxv(AT,q) = A'*q, with AT stored by row: a pull step // vxm(q,A) = A'*q, with A stored by col: a pull step // mxv(AT,q) = A'*q, with AT stored by col: a push step // The GraphBLAS data structure is opaque. An implementation may decide to // store the matrix A in both formats, internally, so that it easily traverse // both in- and out-adjacencies of each node (equivalently, A(i,:) and A(:,i) // can both be easily traversed). This would make a push-pull BFS easy to // implement using just the opaque GrB_Matrix A, but it doubles the storage. // Deciding which format to use automatically is not a simple task, // particularly since the decision must work well throughout GraphBLAS, not // just for the BFS. // MATLAB stores its sparse matrices in CSC format (Compressed Sparse Column). // As a result, the MATLAB expression x=AT*q is a push step, computed using a // saxpy-based algorithm internally, and x=A'*q is a pull step, computed using // a dot product. // SuiteSparse:GraphBLAS can store a matrix in either format, but this requires // an extension to the GraphBLAS C API (GxB_set (A, GxB_FORMAT, f)). where // f = GxB_BY_ROW (that is, CSR) or GxB_BY_COL (that is, CSC). The library // could be augmented in the future with f = Gxb_BY_BOTH. It currently does // not select the format automatically. As a result, if GxB_set is not used, // all its GrB_Matrix objects are stored by row (CSR). // SuiteSparse:GraphBLAS allows the user to query (via GxB_get) an set (via // GxB_set) the format, whether by row or by column. The hypersparsity of // A is selected automatically, with optional hints from the user application, // but a selection between hypersparsity vs standard CSR and CSC has no effect // on the push vs pull decision made here. // The push/pull and saxpy/dot connection can be described as follows. // Assume for these first two examples that MATLAB stores its matrices in CSR // format, where accessing A(i,:) is fast. // If A is stored by row, then x = vxm(q,A) = q'*A can be written in MATLAB // notation as: /* function x = vxm (q,A) % a push step: compute x = q'*A where q is a column vector x = sparse (1,n) for i = 1:n % a saxpy operation, using the ith row of A and the scalar q(i) x = x + q (i) * A (i,:) end */ // If AT is stored by row, then x = mvx(AT,q) = AT*q = A'*q becomes // a dot product: /* function x = mxv (AT,q) % a pull step: compute x = AT*q where q is a column vector for i = 1:n % a dot-product of the ith row of AT and the column vector q x (i) = AT (i,:) * q end */ // The above snippets describe how SuiteSparse:GraphBLAS computes vxm(q,A) and // mxv(AT,q) by default, where A and AT are stored by row by default. However, // they would be very slow in MATLAB, since it stores its sparse matrices in // CSC format. In that case, if A is stored by column and thus accessing // A(:,j) is efficient, then x = vxm(q,A) = q'*A becomes the dot product // instead. These two snippets assume the matrices are both in CSR for, and // thus make more efficient use of MATLAB: /* function x = vxm (q,A) % a pull step: compute x = q'*A where q is a column vector for j = 1:n % a dot product of the row vector q' and the jth column of A x (j) = q' * A (:,j) end */ // If AT is stored by column, then x = mvx(AT,q) is /* function x = mxv (AT,q) % a push step: compute x = AT*q where q is a column vector for j = 1:n % a saxpy operation, using the jth column of AT and the scalar q(i) x = x + AT (:,j) * q end */ // In MATLAB, if q is a sparse column vector and A is a sparse matrix, then // x=A*q does in fact use a saxpy-based method, internally, and x=A'*q uses a // dot product. You can view the code used internally in MATLAB for its sparse // matrix multiplication in the SuiteSparse/MATLAB_Tools/SSMULT and SFMULT // packages, at http://suitesparse.com. // This raises an interesting puzzle for LAGraph, which is intended on being a // graph library that can be run on any implementation of GraphBLAS. There are // no mechanisms in the GraphBLAS C API for LAGraph (or other external packages // or user applications) to provide hints to GraphBLAS. Likely, there are no // query mechanisms where LAGraph can ask GraphBLAS how its matrices might be // stored (LAGraphs asks, "Is A(i,:) fast? Or A(:,j)? Or both?"; the answer // from GraphBLAS is silence). The GraphBLAS data structure is opaque, and it // does not answer this query. // There are two solutions to this puzzle. The most elegant one is for // GraphBLAS to handle all this internally, and change formats as needed. It // could choose to store A in both CSR and CSC format, or use an entirely // different data structure, and it would make the decision between the push or // pull, at each step of the BFS. This is not a simple task since the API is // complex. Furthermore, the selection of the data structure for A has // implications on all other GraphBLAS operations (submatrix assignment and // extraction, for example). // However, if A were to be stored in both CSR and CSC format, inside the // opaque GraphBLAS GrB_Matrix data structure, then LAGraph_bfs_simple would // become a push-pull BFS. // The second solution is to allow the user application or library such as // LAGraph to provide hints and allow it to query the GraphBLAS library. // There are no such features in the GraphBLAS C API. // SuiteSparse:GraphBLAS takes the second approach: It adds two functions that // are extensions to the API: GxB_set changes the format (CSR or CSC), and // GxB_get can query the format. Even this this simplication, // SuiteSparse:GraphBLAS uses 24 different algorithmic variants inside GrB_mxm // (per semiring), and selects between them automatically. By default, all of // its matrices are stored in CSR format (either sparse or hypersparse, // selected automatically). So if no GxB_* extensions are used, all matrices // are in CSR format. // If a GraphBLAS library other than SuiteSparse:GraphBLAS is in use, this // particular function assumes that its input matrices are in CSR format, or at // least A(i,:) and AT(i,:) can be easily accessed. With this assumption, it // is the responsibilty of this function to select between using a push or a // pull, for each step in the BFS. // The following analysis assumes CSR format, and it assumes that dot-product // (a pull step) can terminate early via a short-circuit rule with the OR // monoid, as soon as it encounters a TRUE value. This cuts the time for the // dot-product. Not all GraphBLAS libraries may use this, but SuiteSparse: // GraphBLAS does (in version 2.3.0 and later). Early termination cannot be // done for the saxpy (push step) method. // The work done by the push method (saxpy) is very predictable. BFS uses a // complemented mask. There is no simple way to exploit a complemented mask, // and saxpy has no early termination rule. If the set of nodes in the current // level is q, the work is nnz(A(q,:)). If d = nnz(A)/n is the average degree, // this becomes d*nq where nq = length (q): // pushwork = d*nq // The work done by the pull (dot product) method is less predictable. It can // exploit the complemented mask, and so it only computes (n-nvisited) dot // products, if nvisited is the # of nodes visited so far (in all levels). // With no early-termination, the dot product will take d * log2 (nq) time, // assuming that q is large and a binary search is used internally. That is, // the dot product will scan through the d entries in A(i,:), and do a binary // search for each entry in q. To account for the higher constant of a binary // search, log2(nq) is replaced with (3*(1+log2(nq))). With early termination, // d is too high. If the nodes are randomly marked, the probability of each // node being marked is nvisited/n. The expected number of trials until // success, for a sequence of events with probabilty p, is 1/p. Thus, the // expected number of iterations in a dot product before an early termination // is 1/p = (n/nvisited+1), where +1 is added to avoid a divide by zero. // However, it cannot exceed d. Thus, the total work for the dot product // (pull) method can be estimated as: // per_dot = min (d, n / (nvisited+1)) // pullwork = (n-nvisited) * per_dot * (3 * (1 + log2 ((double) nq))) // The above expressions are valid for SuiteSparse:GraphBLAS v2.3.0 and later, // and may be reasonable for other GraphBLAS implementations. Push or pull // is selected as the one with the least work. // TODO: change the formula for v3.2.0 // The push/pull decision requires that both A and AT be passed in, but this // function can use just one or the other. If only A is passed in and AT is // NULL, then only vxm(q,A) will be used (a push step if A is CSR, or a pull // step if A is CSC). If only AT is passed in and A is NULL, then only // mxv(AT,q) will be used (a pull step if AT is CSR, or a push step if AT is // CSC). // In general, while a push-pull strategy is the fastest, a push-only BFS will // give good peformance. In particular, the time to compute AT=A' plus the // time for the push-pull BFS is typically higher than just a push-only BFS. // This why this function does not compute AT=A'. To take advantage of the // push-pull method, both A and AT must already be available, with the cost to // construct them amortized across other computations such as this one. // A pull-only strategy will be *exceeding* slow. // The input matrix A must be square. It can be non-binary, but best // performance will be obtained if it is GrB_BOOL. It can have explicit // entries equal to zero. These are safely ignored, and are treated as // non-edges. // SuiteSparse:GraphBLAS can detect the CSR vs CSC format of its inputs. // In this case, if both matrices are provided, they must be in the same // format (both GxB_BY_ROW or both GxB_BY_COL). If the matrices are in CSC // format, vxm(q,A) is the pull step and mxv(AT,q) is the push step. // If only A or AT are provided, and the result is a pull-only algorithm, // an error is returned. // References: // Carl Yang, Aydin Buluc, and John D. Owens. 2018. Implementing Push-Pull // Efficiently in GraphBLAS. In Proceedings of the 47th International // Conference on Parallel Processing (ICPP 2018). ACM, New York, NY, USA, // Article 89, 11 pages. DOI: https://doi.org/10.1145/3225058.3225122 // Scott Beamer, Krste Asanovic and David A. Patterson, // The GAP Benchmark Suite, http://arxiv.org/abs/1508.03619, 2015. // http://gap.cs.berkeley.edu/ #include "LAGraph_bfs_pushpull.h" #include "../configuration/config.h" #define LAGRAPH_FREE_ALL \ { \ GrB_free (&v) ; \ GrB_free (&t) ; \ GrB_free (&q) ; \ GrB_free (&pi) ; \ } #define LAGRAPH_ERROR(message,info) \ { \ fprintf (stderr, "LAGraph error: %s\n[%d]\nFile: %s Line: %d\n", \ message, info, __FILE__, __LINE__) ; \ LAGRAPH_FREE_ALL ; \ return (info) ; \ } #define LAGRAPH_MAX(x,y) (((x) > (y)) ? (x) : (y)) #define LAGRAPH_MIN(x,y) (((x) < (y)) ? (x) : (y)) GrB_Info LAGraph_bfs_pushpull // push-pull BFS, or push-only if AT = NULL ( GrB_Vector *v_output, // v(i) is the BFS level of node i in the graph GrB_Vector *pi_output, // pi(i) = p+1 if p is the parent of node i. // if NULL, the parent is not computed. GrB_Matrix A, // input graph, treated as if boolean in semiring GrB_Matrix AT, // transpose of A (optional; push-only if NULL) int64_t source, // starting node of the BFS int64_t *dest, // optional destination node of the BFS int64_t max_level, // optional limit of # levels to search bool vsparse // if true, v is expected to be very sparse ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GrB_Vector q = NULL ; // nodes visited at each level GrB_Vector v = NULL ; // result vector GrB_Vector t = NULL ; // temporary vector GrB_Vector pi = NULL ; // parent vector if(v_output == NULL || (A == NULL && AT == NULL)) { // required output argument is missing LAGRAPH_ERROR("required arguments are NULL", GrB_NULL_POINTER) ; } (*v_output) = NULL ; bool compute_tree = (pi_output != NULL) ; GrB_Index nrows, ncols, nvalA, ignore, nvals ; // A is provided. AT may or may not be provided GrB_Matrix_nrows(&nrows, A) ; GrB_Matrix_ncols(&ncols, A) ; GrB_Matrix_nvals(&nvalA, A) ; bool use_vxm_with_A = true ; // push/pull requires both A and AT bool push_pull = (A != NULL && AT != NULL) ; if(nrows != ncols) { // A must be square LAGRAPH_ERROR("A must be square", GrB_NULL_POINTER) ; } //-------------------------------------------------------------------------- // initializations //-------------------------------------------------------------------------- GrB_Index n = nrows ; int nthreads; Config_Option_get(Config_OPENMP_NTHREAD, &nthreads); nthreads = LAGRAPH_MIN(n / 4096, nthreads) ; nthreads = LAGRAPH_MAX(nthreads, 1) ; // just traverse from the source node max_level = (max_level <= 0) ? n : LAGRAPH_MIN(n, max_level) ; // create an empty vector v GrB_Type int_type = (n > INT32_MAX) ? GrB_INT64 : GrB_INT32 ; GrB_Vector_new(&v, int_type, n) ; // make v dense if requested int64_t vlimit = LAGRAPH_MAX(256, sqrt((double) n)) ; if(!vsparse) { // v is expected to have many entries, so convert v to dense. // If the guess is wrong, v can be made dense later on. GrB_assign(v, NULL, NULL, 0, GrB_ALL, n, NULL) ; } // create a scalar to hold the destination value GrB_Index dest_val ; GrB_Semiring first_semiring, second_semiring ; if(compute_tree) { // create an integer vector q, and set q(source) to source+1 GrB_Vector_new(&q, int_type, n) ; GrB_Vector_setElement(q, source + 1, source) ; if(n > INT32_MAX) { // terminates as soon as it finds any parent; nondeterministic first_semiring = GxB_ANY_FIRST_INT64 ; second_semiring = GxB_ANY_SECOND_INT64 ; } else { // terminates as soon as it finds any parent; nondeterministic first_semiring = GxB_ANY_FIRST_INT32 ; second_semiring = GxB_ANY_SECOND_INT32 ; } // create the empty parent vector GrB_Vector_new(&pi, int_type, n) ; if(!vsparse) { // make pi a dense vector of all zeros GrB_assign(pi, NULL, NULL, 0, GrB_ALL, n, NULL) ; } // pi (source) = source+1 denotes a root of the BFS tree GrB_Vector_setElement(pi, source + 1, source) ; } else { // create a boolean vector q, and set q(source) to true GrB_Vector_new(&q, GrB_BOOL, n) ; GrB_Vector_setElement(q, true, source) ; // terminates as soon as it finds any pair first_semiring = GxB_ANY_PAIR_BOOL ; second_semiring = GxB_ANY_PAIR_BOOL ; } // average node degree double d = (n == 0) ? 0 : (((double) nvalA) / (double) n) ; int64_t nvisited = 0 ; // # nodes visited so far GrB_Index nq = 1 ; // number of nodes in the current level //-------------------------------------------------------------------------- // BFS traversal and label the nodes //-------------------------------------------------------------------------- for(int64_t level = 1 ; ; level++) { //---------------------------------------------------------------------- // set v to the current level, for all nodes in q //---------------------------------------------------------------------- // v<q> = level: set v(i) = level for all nodes i in q GrB_assign(v, q, NULL, level, GrB_ALL, n, GrB_DESC_S) ; //---------------------------------------------------------------------- // check if done //---------------------------------------------------------------------- nvisited += nq ; if(nq == 0 || nvisited == n || level >= max_level) break ; //---------------------------------------------------------------------- // check if destination has been reached, if one is provided //---------------------------------------------------------------------- if(dest) { GrB_Info res = GrB_Vector_extractElement(&dest_val, v, *dest) ; if(res != GrB_NO_VALUE) break ; } //---------------------------------------------------------------------- // check if v should be converted to dense //---------------------------------------------------------------------- if(vsparse && nvisited > vlimit) { // Convert v from sparse to dense to speed up the rest of the work. // If this case is triggered, it would have been faster to pass in // vsparse = false on input. // v <!v> = 0 GrB_assign(v, v, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ; GrB_Vector_nvals(&ignore, v) ; if(compute_tree) { // Convert pi from sparse to dense, to speed up the work. // pi<!pi> = 0 GrB_assign(pi, pi, NULL, 0, GrB_ALL, n, GrB_DESC_SC) ; GrB_Vector_nvals(&ignore, pi) ; } vsparse = false ; } //---------------------------------------------------------------------- // select push vs pull //---------------------------------------------------------------------- if(push_pull) { double pushwork = d * nq ; double expected = (double) n / (double)(nvisited + 1) ; double per_dot = LAGRAPH_MIN(d, expected) ; double binarysearch = (3 * (1 + log2((double) nq))) ; double pullwork = (n - nvisited) * per_dot * binarysearch ; use_vxm_with_A = (pushwork < pullwork) ; } //---------------------------------------------------------------------- // q = next level of the BFS //---------------------------------------------------------------------- if(use_vxm_with_A) { // q'<!v> = q'*A // this is a push step if A is in CSR format; pull if CSC GrB_vxm(q, v, NULL, first_semiring, q, A, GrB_DESC_RC) ; } else { // q<!v> = AT*q // this is a pull step if AT is in CSR format; push if CSC GrB_mxv(q, v, NULL, second_semiring, AT, q, GrB_DESC_RC) ; } //---------------------------------------------------------------------- // move to next level //---------------------------------------------------------------------- if(compute_tree) { //------------------------------------------------------------------ // assign parents //------------------------------------------------------------------ // q(i) currently contains the parent of node i in tree (off by one // so it won't have any zero values, for valued mask). // pi<q> = q GrB_assign(pi, q, NULL, q, GrB_ALL, n, GrB_DESC_S) ; //------------------------------------------------------------------ // replace q with current node numbers //------------------------------------------------------------------ // TODO this could be a unaryop // q(i) = i+1 for all entries in q. GrB_Index *qi ; bool iso ; bool jumbled ; int64_t q_size ; GrB_Index qi_size, qx_size ; if(n > INT32_MAX) { int64_t *qx ; GxB_Vector_export_CSC(&q, &int_type, &n, &qi, (void **) (&qx), &qi_size, &qx_size, &iso, &nq, &jumbled, NULL) ; int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ; nth = LAGRAPH_MAX(nth, 1) ; #pragma omp parallel for num_threads(nth) schedule(static) for(int64_t k = 0 ; k < nq ; k++) { qx [k] = qi [k] + 1 ; } GxB_Vector_import_CSC(&q, int_type, n, &qi, (void **) (&qx), qi_size, qx_size, false, nq, jumbled, NULL) ; } else { int32_t *qx ; GxB_Vector_export_CSC(&q, &int_type, &n, &qi, (void **) (&qx), &qi_size, &qx_size, &iso, &nq, &jumbled, NULL) ; int nth = LAGRAPH_MIN(nq / (64 * 1024), nthreads) ; nth = LAGRAPH_MAX(nth, 1) ; #pragma omp parallel for num_threads(nth) schedule(static) for(int32_t k = 0 ; k < nq ; k++) { qx [k] = qi [k] + 1 ; } GxB_Vector_import_CSC(&q, int_type, n, &qi, (void **) (&qx), qi_size, qx_size, false, nq, jumbled, NULL) ; } } else { //------------------------------------------------------------------ // count the nodes in the current level //------------------------------------------------------------------ GrB_Vector_nvals(&nq, q) ; } } //-------------------------------------------------------------------------- // return the parent vector, if computed //-------------------------------------------------------------------------- if(compute_tree) { (*pi_output) = pi ; pi = NULL ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- (*v_output) = v ; // return result v = NULL ; // set to NULL so LAGRAPH_FREE_ALL doesn't free it LAGRAPH_FREE_ALL ; // free all workspace (except for result v) return (GrB_SUCCESS) ; }
GB_unaryop__abs_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_uint8 // op(A') function: GB_tran__abs_uint64_uint8 // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_uint8 ( uint64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
core_zttmqr.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> /***************************************************************************//** * * @ingroup core_ttmqr * * Overwrites the general m1-by-n1 tile A1 and * m2-by-n2 tile A2 with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * | A1 | | A1 A2 | * Q * | A2 | * * trans = Plasma_ConjTrans Q^H * | A1 | | A1 A2 | * Q^H * | A2 | * * where Q is a complex unitary matrix defined as the product of k * elementary reflectors * * Q = H(1) H(2) . . . H(k) * * as returned by plasma_core_zttqrt. * ******************************************************************************* * * @param[in] side * - PlasmaLeft : apply Q or Q^H from the Left; * - PlasmaRight : apply Q or Q^H from the Right. * * @param[in] trans * - PlasmaNoTrans : Apply Q; * - Plasma_ConjTrans : Apply Q^H. * * @param[in] m1 * The number of rows of the tile A1. m1 >= 0. * * @param[in] n1 * The number of columns of the tile A1. n1 >= 0. * * @param[in] m2 * The number of rows of the tile A2. m2 >= 0. * m2 = m1 if side == PlasmaRight. * * @param[in] n2 * The number of columns of the tile A2. n2 >= 0. * n2 = n1 if side == PlasmaLeft. * * @param[in] k * The number of elementary reflectors whose product defines * the matrix Q. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A1 * On entry, the m1-by-n1 tile A1. * On exit, A1 is overwritten by the application of Q. * * @param[in] lda1 * The leading dimension of the array A1. lda1 >= max(1,m1). * * @param[in,out] A2 * On entry, the m2-by-n2 tile A2. * On exit, A2 is overwritten by the application of Q. * * @param[in] lda2 * The leading dimension of the tile A2. lda2 >= max(1,m2). * * @param[in] V * The i-th row must contain the vector which defines the * elementary reflector H(i), for i = 1,2,...,k, as returned by * plasma_core_zttqrt in the first k columns of its array argument V. * * @param[in] ldv * The leading dimension of the array V. ldv >= max(1,k). * * @param[in] T * The ib-by-k triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param work * Auxiliary workspace array of length * ldwork-by-n1 if side == PlasmaLeft * ldwork-by-ib if side == PlasmaRight * * @param[in] ldwork * The leading dimension of the array work. * ldwork >= max(1,ib) if side == PlasmaLeft * ldwork >= max(1,m1) if side == PlasmaRight * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ __attribute__((weak)) int plasma_core_zttmqr(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, plasma_complex64_t *A1, int lda1, plasma_complex64_t *A2, int lda2, const plasma_complex64_t *V, int ldv, const plasma_complex64_t *T, int ldt, plasma_complex64_t *work, int ldwork) { // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_coreblas_error("illegal value of side"); return -1; } if ((trans != PlasmaNoTrans) && (trans != Plasma_ConjTrans)) { plasma_coreblas_error("illegal value of trans"); return -2; } if (m1 < 0) { plasma_coreblas_error("illegal value of m1"); return -3; } if (n1 < 0) { plasma_coreblas_error("illegal value of n1"); return -4; } if ((m2 < 0) || ((m2 != m1) && (side == PlasmaRight))) { plasma_coreblas_error("illegal value of m2"); return -5; } if ((n2 < 0) || ((n2 != n1) && (side == PlasmaLeft))) { plasma_coreblas_error("illegal value of n2"); return -6; } if ((k < 0) || ((side == PlasmaLeft) && (k > m1)) || ((side == PlasmaRight) && (k > n1))) { plasma_coreblas_error("illegal value of k"); return -7; } if (ib < 0) { plasma_coreblas_error("illegal value of ib"); return -8; } if (A1 == NULL) { plasma_coreblas_error("NULL A1"); return -9; } if (lda1 < imax(1, m1)) { plasma_coreblas_error("illegal value of lda1"); return -10; } if (A2 == NULL) { plasma_coreblas_error("NULL A2"); return -11; } if (lda2 < imax(1, m2)) { plasma_coreblas_error("illegal value of lda2"); return -12; } if (V == NULL) { plasma_coreblas_error("NULL V"); return -13; } if (ldv < imax(1, side == PlasmaLeft ? m2 : n2)) { plasma_coreblas_error("illegal value of ldv"); return -14; } if (T == NULL) { plasma_coreblas_error("NULL T"); return -15; } if (ldt < imax(1,ib)) { plasma_coreblas_error("illegal value of ldt"); return -16; } if (work == NULL) { plasma_coreblas_error("NULL work"); return -17; } if (ldwork < imax(1, side == PlasmaLeft ? ib : m1)) { plasma_coreblas_error("Illegal value of ldwork"); return -18; } // quick return if (m1 == 0 || n1 == 0 || m2 == 0 || n2 == 0 || k == 0 || ib == 0) return PlasmaSuccess; int i1, i3; if ((side == PlasmaLeft && trans != PlasmaNoTrans) || (side == PlasmaRight && trans == PlasmaNoTrans)) { i1 = 0; i3 = ib; } else { i1 = ((k-1)/ib)*ib; i3 = -ib; } for (int i = i1; i > -1 && i < k; i += i3) { int kb = imin(ib, k-i); int ic = 0; int jc = 0; int mi = m1; int ni = n1; int mi2 = m2; int ni2 = n2; int l = 0; if (side == PlasmaLeft) { // H or H^H is applied to C(i:m,1:n). mi = kb; //m1 - i; mi2 = imin(i+kb, m2); ic = i; l = imin(kb, imax(0, m2-i)); } else { ni = kb; ni2 = imin(i+kb, n2); jc = i; l = imin(kb, imax(0, n2-i)); } // Apply H or H^H (NOTE: plasma_core_zparfb used to be core_zttrfb). plasma_core_zparfb(side, trans, PlasmaForward, PlasmaColumnwise, mi, ni, mi2, ni2, kb, l, &A1[lda1*jc+ic], lda1, A2, lda2, &V[ldv*i], ldv, &T[ldt*i], ldt, work, ldwork); } return PlasmaSuccess; } /******************************************************************************/ void plasma_core_omp_zttmqr(plasma_enum_t side, plasma_enum_t trans, int m1, int n1, int m2, int n2, int k, int ib, plasma_complex64_t *A1, int lda1, plasma_complex64_t *A2, int lda2, const plasma_complex64_t *V, int ldv, const plasma_complex64_t *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A1[0:lda1*n1]) \ depend(inout:A2[0:lda2*n2]) \ depend(in:V[0:ldv*k]) \ depend(in:T[0:ib*k]) { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *W = (plasma_complex64_t*)work.spaces[tid]; int ldwork = side == PlasmaLeft ? ib : m1; // TODO: double check // Call the kernel. int info = plasma_core_zttmqr(side, trans, m1, n1, m2, n2, k, ib, A1, lda1, A2, lda2, V, ldv, T, ldt, W, ldwork); if (info != PlasmaSuccess) { plasma_error("core_zttmqr() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
OmpForTermLink.c
int main() { int i; #pragma omp for for (i = 0; i < 10; i++) { } #pragma omp for for (i = 0; i < 10; i++) { continue; } #pragma omp for for (i = 0; i < 10; i++) { if (1) { continue; } } #pragma omp for for (i = 0; i < 10; i++) { int x; x = 10; } }
GB_concat_hyper.c
//------------------------------------------------------------------------------ // GB_concat_hyper: concatenate an array of matrices into a hypersparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #define GB_FREE_ALL \ { \ GB_FREE (&Wi, Wi_size) ; \ GB_FREE_WORK (&Wj, Wj_size) ; \ GB_FREE_WORK (&Wx, Wx_size) ; \ GB_phbix_free (C) ; \ } #include "GB_concat.h" GrB_Info GB_concat_hyper // concatenate into a hypersparse matrix ( GrB_Matrix C, // input/output matrix for results const bool C_iso, // if true, construct C as iso const GB_void *cscalar, // iso value of C, if C is iso const int64_t cnz, // # of entries in C const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n, const GrB_Index m, const GrB_Index n, const int64_t *restrict Tile_rows, // size m+1 const int64_t *restrict Tile_cols, // size n+1 GB_Context Context ) { //-------------------------------------------------------------------------- // allocate triplet workspace to construct C as hypersparse //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix A = NULL ; ASSERT_MATRIX_OK (C, "C input to concat hyper", GB0) ; int64_t *restrict Wi = NULL ; size_t Wi_size = 0 ; int64_t *restrict Wj = NULL ; size_t Wj_size = 0 ; GB_void *restrict Wx = NULL ; size_t Wx_size = 0 ; GrB_Type ctype = C->type ; int64_t cvlen = C->vlen ; int64_t cvdim = C->vdim ; bool csc = C->is_csc ; size_t csize = ctype->size ; GB_Type_code ccode = ctype->code ; float hyper_switch = C->hyper_switch ; float bitmap_switch = C->bitmap_switch ; int sparsity_control = C->sparsity_control ; bool static_header = C->static_header ; GB_phbix_free (C) ; Wi = GB_MALLOC (cnz, int64_t, &Wi_size) ; // becomes C->i Wj = GB_MALLOC_WORK (cnz, int64_t, &Wj_size) ; // freed below if (!C_iso) { Wx = GB_MALLOC_WORK (cnz * csize, GB_void, &Wx_size) ; // freed below } if (Wi == NULL || Wj == NULL || (!C_iso && Wx == NULL)) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int64_t nouter = csc ? n : m ; int64_t ninner = csc ? m : n ; //-------------------------------------------------------------------------- // concatenate all matrices into the list of triplets //-------------------------------------------------------------------------- int64_t pC = 0 ; for (int64_t outer = 0 ; outer < nouter ; outer++) { for (int64_t inner = 0 ; inner < ninner ; inner++) { //------------------------------------------------------------------ // get the tile A //------------------------------------------------------------------ A = csc ? GB_TILE (Tiles, inner, outer) : GB_TILE (Tiles, outer, inner) ; ASSERT (!GB_ANY_PENDING_WORK (A)) ; //------------------------------------------------------------------ // determine where to place the tile in C //------------------------------------------------------------------ // The tile A appears in vectors cvstart:cvend-1 of C, and indices // cistart:ciend-1. int64_t cvstart, cistart ; if (csc) { // C is held by column // Tiles is row-major and accessed in column order cvstart = Tile_cols [outer] ; cistart = Tile_rows [inner] ; } else { // C is held by row // Tiles is row-major and accessed in row order cvstart = Tile_rows [outer] ; cistart = Tile_cols [inner] ; } //------------------------------------------------------------------ // extract the tuples from tile A //------------------------------------------------------------------ // if A is iso but C is not, extractTuples expands A->x [0] into // all Wx [...]. If both A and C are iso, then all tiles are iso, // and Wx is not extracted. int64_t anz = GB_nnz (A) ; GB_OK (GB_extractTuples ( (GrB_Index *) ((csc ? Wi : Wj) + pC), (GrB_Index *) ((csc ? Wj : Wi) + pC), (C_iso) ? NULL : (Wx + pC * csize), (GrB_Index *) (&anz), ccode, A, Context)) ; //------------------------------------------------------------------ // adjust the indices to reflect their new place in C //------------------------------------------------------------------ int nth = GB_nthreads (anz, chunk, nthreads_max) ; if (cistart > 0 && cvstart > 0) { int64_t pA ; #pragma omp parallel for num_threads(nth) schedule(static) for (pA = 0 ; pA < anz ; pA++) { Wi [pC + pA] += cistart ; Wj [pC + pA] += cvstart ; } } else if (cistart > 0) { int64_t pA ; #pragma omp parallel for num_threads(nth) schedule(static) for (pA = 0 ; pA < anz ; pA++) { Wi [pC + pA] += cistart ; } } else if (cvstart > 0) { int64_t pA ; #pragma omp parallel for num_threads(nth) schedule(static) for (pA = 0 ; pA < anz ; pA++) { Wj [pC + pA] += cvstart ; } } //------------------------------------------------------------------ // advance the tuple counter //------------------------------------------------------------------ pC += anz ; } } //-------------------------------------------------------------------------- // build C from the triplets //-------------------------------------------------------------------------- const GB_void *S_input = NULL ; if (C_iso) { S_input = cscalar ; } GB_OK (GB_builder ( C, // create C using a static or dynamic header ctype, // C->type cvlen, // C->vlen cvdim, // C->vdim csc, // C->is_csc (int64_t **) &Wi, // Wi is C->i on output, or freed on error &Wi_size, (int64_t **) &Wj, // Wj, free on output &Wj_size, (GB_void **) &Wx, // Wx, free on output; or NULL if C is iso &Wx_size, false, // tuples need to be sorted true, // no duplicates cnz, // size of Wi and Wj in # of tuples true, // is_matrix: unused NULL, NULL, // original I,J tuples S_input, // cscalar if C is iso, or NULL C_iso, // true if C is iso cnz, // # of tuples NULL, // no duplicates, so dup is NUL ctype, // the type of Wx (no typecasting) Context )) ; C->hyper_switch = hyper_switch ; C->bitmap_switch = bitmap_switch ; C->sparsity_control = sparsity_control ; ASSERT (C->static_header == static_header) ; ASSERT (GB_IS_HYPERSPARSE (C)) ; ASSERT_MATRIX_OK (C, "C from concat hyper", GB0) ; // workspace has been freed by GB_builder, or transplanted into C ASSERT (Wi == NULL) ; ASSERT (Wj == NULL) ; ASSERT (Wx == NULL) ; return (GrB_SUCCESS) ; }
GraphReconstructor.h
// // Copyright (C) 2015-2020 Yahoo Japan Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #pragma once #include <unordered_map> #include <unordered_set> #include <list> #ifdef _OPENMP #include <omp.h> #else #warning "*** OMP is *NOT* available! ***" #endif namespace NGT { class GraphReconstructor { public: static void extractGraph(std::vector<NGT::ObjectDistances> &graph, NGT::Index &index) { NGT::GraphIndex &graphIndex = static_cast<NGT::GraphIndex&>(index.getIndex()); graph.reserve(graphIndex.repository.size()); for (size_t id = 1; id < graphIndex.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "GraphReconstructor::extractGraph: Processed " << id << " objects." << std::endl; } try { NGT::GraphNode &node = *graphIndex.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::ObjectDistances nd; nd.reserve(node.size()); for (auto n = node.begin(graphIndex.repository.allocator); n != node.end(graphIndex.repository.allocator); ++n) { nd.push_back(ObjectDistance((*n).id, (*n).distance)); } graph.push_back(nd); #else graph.push_back(node); #endif if (graph.back().size() != graph.back().capacity()) { std::cerr << "GraphReconstructor::extractGraph: Warning! The graph size must be the same as the capacity. " << id << std::endl; } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor::extractGraph: Warning! Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } } static void adjustPaths(NGT::Index &outIndex) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "construct index is not implemented." << std::endl; exit(1); #else NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex()); size_t rStartRank = 0; std::list<std::pair<size_t, NGT::GraphNode> > tmpGraph; for (size_t id = 1; id < outGraph.repository.size(); id++) { NGT::GraphNode &node = *outGraph.getNode(id); tmpGraph.push_back(std::pair<size_t, NGT::GraphNode>(id, node)); if (node.size() > rStartRank) { node.resize(rStartRank); } } size_t removeCount = 0; for (size_t rank = rStartRank; ; rank++) { bool edge = false; Timer timer; for (auto it = tmpGraph.begin(); it != tmpGraph.end();) { size_t id = (*it).first; try { NGT::GraphNode &node = (*it).second; if (rank >= node.size()) { it = tmpGraph.erase(it); continue; } edge = true; if (rank >= 1 && node[rank - 1].distance > node[rank].distance) { std::cerr << "distance order is wrong!" << std::endl; std::cerr << id << ":" << rank << ":" << node[rank - 1].id << ":" << node[rank].id << std::endl; } NGT::GraphNode &tn = *outGraph.getNode(id); ////////////////// volatile bool found = false; if (rank < 1000) { for (size_t tni = 0; tni < tn.size() && !found; tni++) { if (tn[tni].id == node[rank].id) { continue; } NGT::GraphNode &dstNode = *outGraph.getNode(tn[tni].id); for (size_t dni = 0; dni < dstNode.size(); dni++) { if ((dstNode[dni].id == node[rank].id) && (dstNode[dni].distance < node[rank].distance)) { found = true; break; } } } } else { #ifdef _OPENMP #pragma omp parallel for num_threads(10) #endif for (size_t tni = 0; tni < tn.size(); tni++) { if (found) { continue; } if (tn[tni].id == node[rank].id) { continue; } NGT::GraphNode &dstNode = *outGraph.getNode(tn[tni].id); for (size_t dni = 0; dni < dstNode.size(); dni++) { if ((dstNode[dni].id == node[rank].id) && (dstNode[dni].distance < node[rank].distance)) { found = true; } } } } if (!found) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) outGraph.addEdge(id, node.at(i, outGraph.repository.allocator).id, node.at(i, outGraph.repository.allocator).distance, true); #else tn.push_back(NGT::ObjectDistance(node[rank].id, node[rank].distance)); #endif } else { removeCount++; } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; it++; continue; } it++; } if (edge == false) { break; } } #endif // NGT_SHARED_MEMORY_ALLOCATOR } static void adjustPathsEffectively(NGT::Index &outIndex) { NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex()); adjustPathsEffectively(outGraph); } static void adjustPathsEffectively(NGT::GraphIndex &outGraph) { Timer timer; timer.start(); size_t rStartRank = 0; std::vector<std::pair<size_t, NGT::GraphNode> > tmpGraph; for (size_t id = 1; id < outGraph.repository.size(); id++) { NGT::GraphNode &node = *outGraph.getNode(id); tmpGraph.push_back(std::pair<size_t, NGT::GraphNode>(id, node)); if (node.size() > rStartRank) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.resize(rStartRank, outGraph.repository.allocator); #else node.resize(rStartRank); #endif } } timer.stop(); std::cerr << "GraphReconstructor::adjustPaths: graph preparing time=" << timer << std::endl; timer.reset(); timer.start(); std::vector<std::vector<std::pair<uint32_t, uint32_t> > > removeCandidates(tmpGraph.size()); int removeCandidateCount = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (size_t idx = 0; idx < tmpGraph.size(); ++idx) { auto it = tmpGraph.begin() + idx; size_t id = (*it).first; try { NGT::GraphNode &srcNode = (*it).second; std::unordered_map<uint32_t, std::pair<size_t, double> > neighbors; for (size_t sni = 0; sni < srcNode.size(); ++sni) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) neighbors[srcNode.at(sni, outGraph.repository.allocator).id] = std::pair<size_t, double>(sni, srcNode.at(sni, outGraph.repository.allocator).distance); #else neighbors[srcNode[sni].id] = std::pair<size_t, double>(sni, srcNode[sni].distance); #endif } std::vector<std::pair<int, std::pair<uint32_t, uint32_t> > > candidates; for (size_t sni = 0; sni < srcNode.size(); sni++) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) assert(srcNode.at(sni, outGraph.repository.allocator).id == tmpGraph[srcNode.at(sni, outGraph.repository.allocator).id - 1].first); NGT::GraphNode &pathNode = tmpGraph[srcNode.at(sni, outGraph.repository.allocator).id - 1].second; #else assert(srcNode[sni].id == tmpGraph[srcNode[sni].id - 1].first); NGT::GraphNode &pathNode = tmpGraph[srcNode[sni].id - 1].second; #endif for (size_t pni = 0; pni < pathNode.size(); pni++) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) auto dstNodeID = pathNode.at(pni, outGraph.repository.allocator).id; #else auto dstNodeID = pathNode[pni].id; #endif auto dstNode = neighbors.find(dstNodeID); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) if (dstNode != neighbors.end() && srcNode.at(sni, outGraph.repository.allocator).distance < (*dstNode).second.second && pathNode.at(pni, outGraph.repository.allocator).distance < (*dstNode).second.second ) { #else if (dstNode != neighbors.end() && srcNode[sni].distance < (*dstNode).second.second && pathNode[pni].distance < (*dstNode).second.second ) { #endif #if defined(NGT_SHARED_MEMORY_ALLOCATOR) candidates.push_back(std::pair<int, std::pair<uint32_t, uint32_t> >((*dstNode).second.first, std::pair<uint32_t, uint32_t>(srcNode.at(sni, outGraph.repository.allocator).id, dstNodeID))); #else candidates.push_back(std::pair<int, std::pair<uint32_t, uint32_t> >((*dstNode).second.first, std::pair<uint32_t, uint32_t>(srcNode[sni].id, dstNodeID))); #endif removeCandidateCount++; } } } sort(candidates.begin(), candidates.end(), std::greater<std::pair<int, std::pair<uint32_t, uint32_t>>>()); for (size_t i = 0; i < candidates.size(); i++) { removeCandidates[idx].push_back(candidates[i].second); } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } timer.stop(); std::cerr << "GraphReconstructor::adjustPaths extracting removed edge candidates time=" << timer << std::endl; timer.reset(); timer.start(); std::list<size_t> ids; for (auto it = tmpGraph.begin(); it != tmpGraph.end(); ++it) { size_t id = (*it).first; ids.push_back(id); } int removeCount = 0; removeCandidateCount = 0; std::vector<std::unordered_set<uint32_t> > edges(tmpGraph.size()); for (size_t rank = 0; ids.size() != 0; rank++) { for (auto it = ids.begin(); it != ids.end(); ) { size_t id = *it; size_t idx = id - 1; try { NGT::GraphNode &srcNode = tmpGraph[idx].second; if (rank >= srcNode.size()) { if (!removeCandidates[idx].empty()) { std::cerr << "Something wrong! ID=" << id << " # of remaining candidates=" << removeCandidates[idx].size() << std::endl; abort(); } it = ids.erase(it); continue; } if (removeCandidates[idx].size() > 0) { removeCandidateCount++; bool pathExist = false; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode.at(rank, outGraph.repository.allocator).id)) { #else while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode[rank].id)) { #endif size_t path = removeCandidates[idx].back().first; size_t dst = removeCandidates[idx].back().second; removeCandidates[idx].pop_back(); if ((edges[idx].find(path) != edges[idx].end()) && (edges[path - 1].find(dst) != edges[path - 1].end())) { pathExist = true; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode.at(rank, outGraph.repository.allocator).id)) { #else while (!removeCandidates[idx].empty() && (removeCandidates[idx].back().second == srcNode[rank].id)) { #endif removeCandidates[idx].pop_back(); } break; } } if (pathExist) { removeCount++; it++; continue; } } #if defined(NGT_SHARED_MEMORY_ALLOCATOR) edges[idx].insert(srcNode.at(rank, outGraph.repository.allocator).id); #else edges[idx].insert(srcNode[rank].id); #endif NGT::GraphNode &outSrcNode = *outGraph.getNode(id); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) outSrcNode.push_back(NGT::ObjectDistance(srcNode.at(rank, outGraph.repository.allocator).id, srcNode.at(rank, outGraph.repository.allocator).distance), outGraph.repository.allocator); #else size_t r = outSrcNode.capacity(); size_t s = outSrcNode.size(); outSrcNode.push_back(NGT::ObjectDistance(srcNode[rank].id, srcNode[rank].distance)); if (r != outSrcNode.capacity()) { std::cerr << id << "-" << rank << " " << s << ":" << r << ":" << outSrcNode.capacity() << std::endl; } #endif } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; it++; continue; } it++; } } } static void convertToANNG(std::vector<NGT::ObjectDistances> &graph) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "convertToANNG is not implemented for shared memory." << std::endl; return; #else std::cerr << "convertToANNG begin" << std::endl; for (size_t idx = 0; idx < graph.size(); idx++) { NGT::GraphNode &node = graph[idx]; for (auto ni = node.begin(); ni != node.end(); ++ni) { graph[(*ni).id - 1].push_back(NGT::ObjectDistance(idx + 1, (*ni).distance)); } } for (size_t idx = 0; idx < graph.size(); idx++) { NGT::GraphNode &node = graph[idx]; if (node.size() == 0) { continue; } std::sort(node.begin(), node.end()); NGT::ObjectID prev = 0; for (auto it = node.begin(); it != node.end();) { if (prev == (*it).id) { it = node.erase(it); continue; } prev = (*it).id; it++; } NGT::GraphNode tmp = node; node.swap(tmp); } std::cerr << "convertToANNG end" << std::endl; #endif } static void reconstructGraph(std::vector<NGT::ObjectDistances> &graph, NGT::Index &outIndex, size_t originalEdgeSize, size_t reverseEdgeSize) { if (reverseEdgeSize > 10000) { std::cerr << "something wrong. Edge size=" << reverseEdgeSize << std::endl; exit(1); } NGT::Timer originalEdgeTimer, reverseEdgeTimer, normalizeEdgeTimer; originalEdgeTimer.start(); NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex()); for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &node = *outGraph.getNode(id); if (originalEdgeSize == 0) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.clear(outGraph.repository.allocator); #else NGT::GraphNode empty; node.swap(empty); #endif } else { NGT::ObjectDistances n = graph[id - 1]; if (n.size() < originalEdgeSize) { std::cerr << "node size is too few." << std::endl; std::cerr << n.size() << ":" << originalEdgeSize << std::endl; continue; } n.resize(originalEdgeSize); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) node.copy(n, outGraph.repository.allocator); #else node.swap(n); #endif } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } originalEdgeTimer.stop(); reverseEdgeTimer.start(); int insufficientNodeCount = 0; for (size_t id = 1; id <= graph.size(); ++id) { try { NGT::ObjectDistances &node = graph[id - 1]; size_t rsize = reverseEdgeSize; if (rsize > node.size()) { insufficientNodeCount++; rsize = node.size(); } for (size_t i = 0; i < rsize; ++i) { NGT::Distance distance = node[i].distance; size_t nodeID = node[i].id; try { NGT::GraphNode &n = *outGraph.getNode(nodeID); #if defined(NGT_SHARED_MEMORY_ALLOCATOR) n.push_back(NGT::ObjectDistance(id, distance), outGraph.repository.allocator); #else n.push_back(NGT::ObjectDistance(id, distance)); #endif } catch(...) {} } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } reverseEdgeTimer.stop(); if (insufficientNodeCount != 0) { std::cerr << "# of the nodes edges of which are in short = " << insufficientNodeCount << std::endl; } normalizeEdgeTimer.start(); for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &n = *outGraph.getNode(id); if (id % 100000 == 0) { std::cerr << "Processed " << id << " nodes" << std::endl; } #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::sort(n.begin(outGraph.repository.allocator), n.end(outGraph.repository.allocator)); #else std::sort(n.begin(), n.end()); #endif NGT::ObjectID prev = 0; #if defined(NGT_SHARED_MEMORY_ALLOCATOR) for (auto it = n.begin(outGraph.repository.allocator); it != n.end(outGraph.repository.allocator);) { #else for (auto it = n.begin(); it != n.end();) { #endif if (prev == (*it).id) { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) it = n.erase(it, outGraph.repository.allocator); #else it = n.erase(it); #endif continue; } prev = (*it).id; it++; } #if !defined(NGT_SHARED_MEMORY_ALLOCATOR) NGT::GraphNode tmp = n; n.swap(tmp); #endif } catch (...) { std::cerr << "Graph::construct: error. something wrong. ID=" << id << std::endl; } } normalizeEdgeTimer.stop(); std::cerr << "Reconstruction time=" << originalEdgeTimer.time << ":" << reverseEdgeTimer.time << ":" << normalizeEdgeTimer.time << std::endl; std::cerr << "original edge size=" << originalEdgeSize << std::endl; std::cerr << "reverse edge size=" << reverseEdgeSize << std::endl; } static void reconstructGraphWithConstraint(std::vector<NGT::ObjectDistances> &graph, NGT::Index &outIndex, size_t originalEdgeSize, size_t reverseEdgeSize, char mode = 'a') { #if defined(NGT_SHARED_MEMORY_ALLOCATOR) std::cerr << "reconstructGraphWithConstraint is not implemented." << std::endl; abort(); #else NGT::Timer originalEdgeTimer, reverseEdgeTimer, normalizeEdgeTimer; if (reverseEdgeSize > 10000) { std::cerr << "something wrong. Edge size=" << reverseEdgeSize << std::endl; exit(1); } NGT::GraphIndex &outGraph = dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex()); for (size_t id = 1; id < outGraph.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "Processed " << id << std::endl; } try { NGT::GraphNode &node = *outGraph.getNode(id); if (node.size() == 0) { continue; } node.clear(); NGT::GraphNode empty; node.swap(empty); } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } NGT::GraphIndex::showStatisticsOfGraph(dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex())); std::vector<ObjectDistances> reverse(graph.size() + 1); for (size_t id = 1; id <= graph.size(); ++id) { try { NGT::GraphNode &node = graph[id - 1]; if (id % 100000 == 0) { std::cerr << "Processed (summing up) " << id << std::endl; } for (size_t rank = 0; rank < node.size(); rank++) { reverse[node[rank].id].push_back(ObjectDistance(id, node[rank].distance)); } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } std::vector<std::pair<size_t, size_t> > reverseSize(graph.size() + 1); reverseSize[0] = std::pair<size_t, size_t>(0, 0); for (size_t rid = 1; rid <= graph.size(); ++rid) { reverseSize[rid] = std::pair<size_t, size_t>(reverse[rid].size(), rid); } std::sort(reverseSize.begin(), reverseSize.end()); std::vector<uint32_t> indegreeCount(graph.size(), 0); size_t zeroCount = 0; for (size_t sizerank = 0; sizerank <= reverseSize.size(); sizerank++) { if (reverseSize[sizerank].first == 0) { zeroCount++; continue; } size_t rid = reverseSize[sizerank].second; ObjectDistances &rnode = reverse[rid]; for (auto rni = rnode.begin(); rni != rnode.end(); ++rni) { if (indegreeCount[(*rni).id] >= reverseEdgeSize) { continue; } NGT::GraphNode &node = *outGraph.getNode(rid); if (indegreeCount[(*rni).id] > 0 && node.size() >= originalEdgeSize) { continue; } node.push_back(NGT::ObjectDistance((*rni).id, (*rni).distance)); indegreeCount[(*rni).id]++; } } reverseEdgeTimer.stop(); std::cerr << "The number of nodes with zero outdegree by reverse edges=" << zeroCount << std::endl; NGT::GraphIndex::showStatisticsOfGraph(dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex())); normalizeEdgeTimer.start(); for (size_t id = 1; id < outGraph.repository.size(); id++) { try { NGT::GraphNode &n = *outGraph.getNode(id); if (id % 100000 == 0) { std::cerr << "Processed " << id << std::endl; } std::sort(n.begin(), n.end()); NGT::ObjectID prev = 0; for (auto it = n.begin(); it != n.end();) { if (prev == (*it).id) { it = n.erase(it); continue; } prev = (*it).id; it++; } NGT::GraphNode tmp = n; n.swap(tmp); } catch (...) { std::cerr << "Graph::construct: error. something wrong. ID=" << id << std::endl; } } normalizeEdgeTimer.stop(); NGT::GraphIndex::showStatisticsOfGraph(dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex())); originalEdgeTimer.start(); for (size_t id = 1; id < outGraph.repository.size(); id++) { if (id % 1000000 == 0) { std::cerr << "Processed " << id << std::endl; } NGT::GraphNode &node = graph[id - 1]; try { NGT::GraphNode &onode = *outGraph.getNode(id); bool stop = false; for (size_t rank = 0; (rank < node.size() && rank < originalEdgeSize) && stop == false; rank++) { switch (mode) { case 'a': if (onode.size() >= originalEdgeSize) { stop = true; continue; } break; case 'c': break; } NGT::Distance distance = node[rank].distance; size_t nodeID = node[rank].id; outGraph.addEdge(id, nodeID, distance, false); } } catch(NGT::Exception &err) { std::cerr << "GraphReconstructor: Warning. Cannot get the node. ID=" << id << ":" << err.what() << std::endl; continue; } } originalEdgeTimer.stop(); NGT::GraphIndex::showStatisticsOfGraph(dynamic_cast<NGT::GraphIndex&>(outIndex.getIndex())); std::cerr << "Reconstruction time=" << originalEdgeTimer.time << ":" << reverseEdgeTimer.time << ":" << normalizeEdgeTimer.time << std::endl; std::cerr << "original edge size=" << originalEdgeSize << std::endl; std::cerr << "reverse edge size=" << reverseEdgeSize << std::endl; #endif } }; }; // NGT
pkzip_fmt_plug.c
/* PKZIP patch for john to handle 'old' pkzip passwords (old 'native' format) * * Written by Jim Fougeron <jfoug at cox.net> in 2011. No copyright * is claimed, and the software is hereby placed in the public domain. * In case this attempt to disclaim copyright and place the software in the * public domain is deemed null and void, then the software is * Copyright (c) 2011 Jim Fougeron and it is hereby released to the * general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * There's ABSOLUTELY NO WARRANTY, express or implied. * */ #include "arch.h" #if !AC_BUILT #define HAVE_LIBZ 1 /* legacy build has -lz in LDFLAGS */ #endif #if HAVE_LIBZ #if FMT_EXTERNS_H extern struct fmt_main fmt_pkzip; #elif FMT_REGISTERS_H john_register_one(&fmt_pkzip); #else #include <string.h> #include <zlib.h> #include "common.h" #include "misc.h" #include "formats.h" #define USE_PKZIP_MAGIC 1 #include "pkzip.h" #include "pkzip_inffixed.h" // This file is a data file, taken from zlib #include "loader.h" #ifdef _OPENMP #include <omp.h> #endif #include "memdbg.h" #define FORMAT_LABEL "PKZIP" #define FORMAT_NAME "" #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1000 #define PLAINTEXT_LENGTH 31 #define BINARY_SIZE 0 #define BINARY_ALIGN 1 #define SALT_SIZE (sizeof(PKZ_SALT*)) #define SALT_ALIGN (sizeof(ARCH_WORD_32)) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 #ifndef OMP_SCALE #define OMP_SCALE 64 #endif //#define ZIP_DEBUG 1 //#define ZIP_DEBUG 2 /* * It is likely that this should be put into the arch.h files for the different systems, * IF we find a system which operates faster doing the non-table work. * However, in current testing, it is always faster to use the multiply table. It only * takes 16kb, and almost always stays in the cache for any system newer than a 386. */ #define PKZIP_USE_MULT_TABLE #if ARCH_LITTLE_ENDIAN #define KB1 0 #define KB2 3 #else #define KB1 3 #define KB2 0 #endif /* * filename:$pkzip$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*DA]*$/pkzip$ (deprecated) * filename:$pkzip2$C*B*[DT*MT{CL*UL*CR*OF*OX}*CT*DL*CS*TC*DA]*$/pkzip2$ (new format, with 2 checksums) * * All numeric and 'binary data' fields are stored in hex. * * C is the count of hashes present (the array of items, inside the [] C can be 1 to 3.). * B is number of valid bytes in the checksum (1 or 2). Unix zip is 2 bytes, all others are 1 * ARRAY of data starts here (there will be C array elements) * DT is a "Data Type enum". This will be 1 2 or 3. 1 is 'partial'. 2 and 3 are full file data (2 is inline, 3 is load from file). * MT Magic Type enum. 0 is no 'type'. 255 is 'text'. Other types (like MS Doc, GIF, etc), see source. * NOTE, CL, DL, CRC, OFF are only present if DT != 1 * CL Compressed length of file blob data (includes 12 byte IV). * UL Uncompressed length of the file. * CR CRC32 of the 'final' file. * OF Offset to the PK\x3\x4 record for this file data. If DT==2, then this will be a 0, as it is not needed, all of the data is already included in the line. * OX Additional offset (past OF), to get to the zip data within the file. * END OF 'optional' fields. * CT Compression type (0 or 8) 0 is stored, 8 is imploded. * DL Length of the DA data. * CS Checksum from crc32. * TC Checksum from timestamp * DA This is the 'data'. It will be hex data if DT==1 or 2. If DT==3, then it is a filename (name of the .zip file). * END of array items. * The format string will end with $/pkzip$ * * NOTE, after some code testing, it has come to show, that the 'magic' may not be needed, or very useful. The problem with it, is IF the file * ends up NOT starting with any of the magic values, then we will have a false negative, and NEVER be able to crack the zip's password. For now * we have a #define (right before the #include "pkzip.h"). If that define is uncommented, then pkzip format will be built with magic logic. * However, right now it is not being built that way. * */ static struct fmt_tests tests[] = { /* compression of a perl file. We have the same password, same file used twice in a row (pkzip, 1 byte checksum). NOTE, pkzip uses random IV, so both encrypted blobs are different */ {"\ $pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*194883130e4c7419bd735c53dec36f0c4b6de6daefea0f507d67ff7256a49b5ea93ccfd9b12f2ee99053ee0b1c9e1c2b88aeaeb6bd4e60094a1ea118785d4ded6dae94\ cade41199330f4f11b37cba7cda5d69529bdfa43e2700ba517bd2f7ff4a0d4b3d7f2559690ec044deb818c44844d6dd50adbebf02cec663ae8ebb0dde05d2abc31eaf6de36a2fc19fda65dd6a7e449f669d1f8c75e9daa0a3f7b\ e8feaa43bf84762d6dbcc9424285a93cedfa3a75dadc11e969065f94fe3991bc23c9b09eaa5318aa29fa02e83b6bee26cafec0a5e189242ac9e562c7a5ed673f599cefcd398617*$/pkzip$", "password" }, {"\ $pkzip$1*1*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*581f798527109cbadfca0b3318435a000be84366caf9723f841a2b13e27c2ed8cdb5628705a98c3fbbfb34552ed498c51a172641bf231f9948bca304a6be2138ab718f\ 6a5b1c513a2fb80c49030ff1a404f7bd04dd47c684317adea4107e5d70ce13edc356c60bebd532418e0855428f9dd582265956e39a0b446a10fd8b7ffb2b4af559351bbd549407381c0d2acc270f3bcaffb275cbe2f628cb09e2\ 978e87cd023d4ccb50caaa92b6c952ba779980d65f59f664dde2451cc456d435188be59301a5df1b1b4fed6b7509196334556c44208a9d7e2d9e237f591d6c9fc467b408bf0aaa*$/pkzip$", "password" }, /* Now the same file, compressed twice, using unix zip (info-zip), with 2 byte checksums */ {"\ $pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\ 73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\ 3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password" }, {"\ $pkzip$1*2*2*0*e4*1c5*eda7a8de*0*47*8*e4*4bb6*436c9ffa4328870f6272349b591095e1b1126420c3041744650282bc4f575d0d4a5fc5fb34724e6a1cde742192387b9ed749ab5c72cd6bb0206f102e9216538f095fb7\ 73661cfde82c2e2a619332998124648bf4cd0da56279f0c297567d9b5d684125ee92920dd513fd18c27afba2a9633614f75d8f8b9a14095e3fafe8165330871287222e6681dd9c0f830cf5d464457b257d0900eed29107fad8af\ 3ac4f87cf5af5183ff0516ccd9aeac1186006c8d11b18742dfb526aadbf2906772fbfe8fb18798967fd397a724d59f6fcd4c32736550986d227a6b447ef70585c049a1a4d7bf25*$/pkzip$", "password"}, /* now a pkzip archive, with 3 files, 1 byte checksum */ {"\ $pkzip$3*1*1*0*8*24*4001*8986ec4d693e86c1a42c1bd2e6a994cb0b98507a6ec937fe0a41681c02fe52c61e3cc046*1*0*8*24*4003*a087adcda58de2e14e73db0043a4ff0ed3acc6a9aee3985d7cb81d5ddb32b840ea20\ 57d9*2*0*e4*1c5*eda7a8de*0*4c*8*e4*eda7*89a792af804bf38e31fdccc8919a75ab6eb75d1fd6e7ecefa3c5b9c78c3d50d656f42e582af95882a38168a8493b2de5031bb8b39797463cb4769a955a2ba72abe48ee75b103\ f93ef9984ae740559b9bd84cf848d693d86acabd84749853675fb1a79edd747867ef52f4ee82435af332d43f0d0bb056c49384d740523fa75b86a6d29a138da90a8de31dbfa89f2f6b0550c2b47c43d907395904453ddf42a665\ b5f7662de170986f89d46d944b519e1db9d13d4254a6b0a5ac02b3cfdd468d7a4965e4af05699a920e6f3ddcedb57d956a6b2754835b14e174070ba6aec4882d581c9f30*$/pkzip$", "3!files"}, /* following are from CMIYC 2012 */ {"$pkzip$1*1*2*0*163*2b5*cd154083*0*26*8*163*cd15*d6b094794b40116a8b387c10159225d776f815b178186e51faf16fa981fddbffdfa22f6c6f32d2f81dab35e141f2899841991f3cb8d53f8ee1f1d85657f7c7a82ebb2d63182803c6beee00e0bf6c72edeeb1b00dc9f07f917bb8544cc0e96ca01503cd0fb6632c296cebe3fb9b64543925daae6b7ea95cfd27c42f6f3465e0ab2c812b9aeeb15209ce3b691f27ea43a7a77b89c2387e31c4775866a044b6da783af8ddb72784ccaff4d9a246db96484e865ea208ade290b0131b4d2dd21f172693e6b5c90f2eb9b67572b55874b6d3a78763212b248629e744c07871a6054e24ef74b6d779e44970e1619df223b4e5a72a189bef40682b62be6fb7f65e087ca6ee19d1ebfc259fa7e3d98f3cb99347689f8360294352accffb146edafa9e91afba1f119f95145738ac366b332743d4ff40d49fac42b8758c43b0af5b60b8a1c63338359ffbff432774f2c92de3f8c49bd4611e134db98e6a3f2cfb148d2b20f75abab6*$/pkzip$", "passwort"}, {"$pkzip$1*1*2*0*163*2b6*46abc149*0*28*8*163*46ab*0f539b23b761a347a329f362f7f1f0249515f000404c77ec0b0ffe06f29140e8fa3e8e5a6354e57f3252fae3d744212d4d425dc44389dd4450aa9a4f2f3c072bee39d6ac6662620812978f7ab166c66e1acb703602707ab2da96bb28033485ec192389f213e48eda8fc7d9dad1965b097fafebfda6703117db90e0295db9a653058cb28215c3245e6e0f6ad321065bf7b8cc5f66f6f2636e0d02ea35a6ba64bbf0191c308098fd836e278abbce7f10c3360a0a682663f59f92d9c2dcfc87cde2aae27ea18a14d2e4a0752b6b51e7a5c4c8c2bab88f4fb0aba27fb20e448655021bb3ac63752fdb01e6b7c99f9223f9e15d71eb1bd8e323f522fc3da467ff0aae1aa17824085d5d6f1cdfc9c7c689cd7cb057005d94ba691f388484cfb842c8775baac220a5490ed945c8b0414dbfc4589254b856aade49f1aa386db86e9fc87e6475b452bd72c5e2122df239f8c2fd462ca54c1a5bddac36918c5f5cf0cc94aa6ee820*$/pkzip$", "Credit11"}, {"$pkzip$1*1*2*0*163*2b6*46abc149*0*26*8*163*46ab*7ea9a6b07ddc9419439311702b4800e7e1f620b0ab8535c5aa3b14287063557b176cf87a800b8ee496643c0b54a77684929cc160869db4443edc44338294458f1b6c8f056abb0fa27a5e5099e19a07735ff73dc91c6b20b05c023b3ef019529f6f67584343ac6d86fa3d12113f3d374b047efe90e2a325c0901598f31f7fb2a31a615c51ea8435a97d07e0bd4d4afbd228231dbc5e60bf1116ce49d6ce2547b63a1b057f286401acb7c21afbb673f3e26bc1b2114ab0b581f039c2739c7dd0af92c986fc4831b6c294783f1abb0765cf754eada132df751cf94cad7f29bb2fec0c7c47a7177dea82644fc17b455ba2b4ded6d9a24e268fcc4545cae73b14ceca1b429d74d1ebb6947274d9b0dcfb2e1ac6f6b7cd2be8f6141c3295c0dbe25b65ff89feb62cb24bd5be33853b88b8ac839fdd295f71e17a7ae1f054e27ba5e60ca03c6601b85c3055601ce41a33127938440600aaa16cfdd31afaa909fd80afc8690aaf*$/pkzip$", "7J0rdan!!"}, /* CMIYC 2013 "pro" hard hash */ {"$pkzip$1*2*2*0*6b*73*8e687a5b*0*46*8*6b*0d9d*636fedc7a78a7f80cda8542441e71092d87d13da94c93848c230ea43fab5978759e506110b77bd4bc10c95bc909598a10adfd4febc0d42f3cd31e4fec848d6f49ab24bb915cf939fb1ce09326378bb8ecafde7d3fe06b6013628a779e017be0f0ad278a5b04e41807ae9fc*$/pkzip$", "c00rslit3!"}, /* http://corkami.googlecode.com/files/ChristmasGIFts.zip (fixed with 2 byte checksums from timestamp, using new $pkzip2$ type) */ {"$pkzip2$3*2*1*2*8*c0*7224*72f6*6195f9f3401076b22f006105c4323f7ac8bb8ebf8d570dc9c7f13ddacd8f071783f6bef08e09ce4f749af00178e56bc948ada1953a0263c706fd39e96bb46731f827a764c9d55945a89b952f0503747703d40ed4748a8e5c31cb7024366d0ef2b0eb4232e250d343416c12c7cbc15d41e01e986857d320fb6a2d23f4c44201c808be107912dbfe4586e3bf2c966d926073078b92a2a91568081daae85cbcddec75692485d0e89994634c71090271ac7b4a874ede424dafe1de795075d2916eae*1*6*8*c0*26ee*461b*944bebb405b5eab4322a9ce6f7030ace3d8ec776b0a989752cf29569acbdd1fb3f5bd5fe7e4775d71f9ba728bf6c17aad1516f3aebf096c26f0c40e19a042809074caa5ae22f06c7dcd1d8e3334243bca723d20875bd80c54944712562c4ff5fdb25be5f4eed04f75f79584bfd28f8b786dd82fd0ffc760893dac4025f301c2802b79b3cb6bbdf565ceb3190849afdf1f17688b8a65df7bc53bc83b01a15c375e34970ae080307638b763fb10783b18b5dec78d8dfac58f49e3c3be62d6d54f9*2*0*2a*1e*4a204eab*ce8*2c*0*2a*4a20*7235*6b6e1a8de47449a77e6f0d126b217d6b2b72227c0885f7dc10a2fb3e7cb0e611c5c219a78f98a9069f30*$/pkzip2$", "123456"}, {NULL} }; /* these static fields are used in the crypt_all loop, and the cmp_all/cmp_one we */ /* perform the pkzip 'checksum' checking. If we do get a 'hit', then that pass & */ /* salt pair is checked fully within the cmp_exact, where it gets inflated and */ /* checked (possibly also a 'sample TEXT record is done first, as a quick check */ static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static u32 *K12; static PKZ_SALT *salt; static u8 *chk; static int dirty=1; #if USE_PKZIP_MAGIC static ZIP_SIGS SIGS[256]; #endif #ifdef PKZIP_USE_MULT_TABLE static u8 mult_tab[16384]; #define PKZ_MULT(b,w) b^mult_tab[(u16)(w.u)>>2] #else inline u8 PKZ_MULT(u8 b, MY_WORD w) {u16 t = w.u|2; return b ^ (u8)(((u16)(t*(t^1))>>8)); } #endif extern struct fmt_main fmt_pkzip; static const char *ValidateZipContents(FILE *in, long offset, u32 offex, int len, u32 crc); /* Since the pkzip format textual representation is pretty complex, with multiple */ /* 'optional' sections, we have a VERY complete valid. Valid will make SURE that */ /* the format is completely valid. Thus, there is little or no error checking later */ /* in the rest of the code. It 'should' not be needed, and is done here. There is */ /* a little error checking later in the file, for some of the file opening stuff, */ /* since the file can change from the time of this 'valid' call, until when the data */ /* is actually read from the file. */ /* */ /* NOTE, we may want to later make a 'prepare()' function, and do all file loading */ /* there, so that we have a 'complete' format line, with the zip data contained. */ static int valid(char *ciphertext, struct fmt_main *self) { c8 *p, *cp, *cpkeep; int cnt, data_len, ret=0; u32 crc; FILE *in; const char *sFailStr; long offset; u32 offex; int type; int complen = 0; int type2 = 0; if (strncmp(ciphertext, "$pkzip$", 7)) { if (!strncmp(ciphertext, "$pkzip2$", 8)) type2 = 1; else return ret; } /* handle 'chopped' .pot lines */ if (ldr_isa_pot_source(ciphertext)) return 1; cpkeep = strdup(ciphertext); cp = cpkeep; p = &cp[7]; if (type2) ++p; if ((cp = strtokm(p, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) { sFailStr = "Out of data, reading count of hashes field"; goto Bail; } sscanf(cp, "%x", &cnt); if (cnt < 1 || cnt > MAX_PKZ_FILES) { sFailStr = "Count of hashes field out of range"; goto Bail; } if ((cp = strtokm(NULL, "*")) == NULL || cp[0] < '0' || cp[0] > '2' || cp[1]) { sFailStr = "Number of valid hash bytes empty or out of range"; goto Bail; } while (cnt--) { if ((cp = strtokm(NULL, "*")) == NULL || cp[0]<'1' || cp[0]>'3' || cp[1]) { sFailStr = "Invalid data enumeration type"; goto Bail; } type = cp[0] - '0'; if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) { sFailStr = "Invalid type enumeration"; goto Bail; } if (type > 1) { if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) { sFailStr = "Invalid compressed length"; goto Bail; } sscanf(cp, "%x", &complen); if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) { sFailStr = "Invalid data length value"; goto Bail; } if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) { sFailStr = "Invalid CRC value"; goto Bail; } sscanf(cp, "%x", &crc); if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) { sFailStr = "Invalid offset length"; goto Bail; } sscanf(cp, "%lx", &offset); if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) { sFailStr = "Invalid offset length"; goto Bail; } sscanf(cp, "%x", &offex); } if ((cp = strtokm(NULL, "*")) == NULL || (cp[0] != '0' && cp[0] != '8') || cp[1]) { sFailStr = "Compression type enumeration"; goto Bail; } if ((cp = strtokm(NULL, "*")) == NULL || !cp[0] || !ishexlc_oddOK(cp)) { sFailStr = "Invalid data length value"; goto Bail; } sscanf(cp, "%x", &data_len); if ((cp = strtokm(NULL, "*")) == NULL || !ishexlc(cp) || strlen(cp) != 4) { sFailStr = "invalid checksum value"; goto Bail; } if (type2) { if ((cp = strtokm(NULL, "*")) == NULL || !ishexlc(cp) || strlen(cp) != 4) { sFailStr = "invalid checksum2 value"; goto Bail;} } if ((cp = strtokm(NULL, "*")) == NULL) goto Bail; if (type > 1) { if (type == 3) { if ( strlen(cp) != data_len) { sFailStr = "invalid checksum value"; goto Bail; } in = fopen(cp, "rb"); /* have to open in bin mode for OS's where this matters, DOS/Win32 */ if (!in) { /* this error is listed, even if not in pkzip debugging mode. */ /* But not if we're just reading old pot lines */ if (!ldr_in_pot) fprintf(stderr, "Error loading a pkzip hash line. The ZIP file '%s' could NOT be found\n", cp); return 0; } sFailStr = ValidateZipContents(in, offset, offex, complen, crc); if (*sFailStr) { /* this error is listed, even if not in pkzip debugging mode. */ fprintf(stderr, "pkzip validation failed [%s] Hash is %s\n", sFailStr, ciphertext); fclose(in); return 0; } fseek(in, offset+offex, SEEK_SET); if (complen < 16*1024) { /* simply load the whole blob */ void *tbuf = mem_alloc(complen); if (fread(tbuf, 1, complen, in) != complen) { MEM_FREE(tbuf); fclose(in); return 0; } data_len = complen; MEM_FREE(tbuf); } fclose(in); } else { /* 'inline' data. */ if (complen != data_len) { sFailStr = "length of full data does not match the salt len"; goto Bail; } if (!ishexlc(cp) || strlen(cp) != data_len<<1) { sFailStr = "invalid inline data"; goto Bail; } } } else { if (!ishexlc(cp) || strlen(cp) != data_len<<1) { sFailStr = "invalid partial data"; goto Bail; } } } if ((cp = strtokm(NULL, "*")) == NULL) goto Bail; if (strtokm(NULL, "") != NULL) goto Bail; if (type2) ret = !strcmp(cp, "$/pkzip2$"); else ret = !strcmp(cp, "$/pkzip$"); Bail:; #ifdef ZIP_DEBUG if (!ret) fprintf (stderr, "pkzip validation failed [%s] Hash is %s\n", sFailStr, ciphertext); #endif MEM_FREE(cpkeep); return ret; } static const char *ValidateZipContents(FILE *fp, long offset, u32 offex, int _len, u32 _crc) { u32 id; u16 version, flags, method, modtm, moddt, namelen, exlen; u32 crc, complen, uncomplen; if (fseek(fp, offset, SEEK_SET) != 0) return "Not able to seek to specified offset in the .zip file, to read the zip blob data."; id = fget32LE(fp); if (id != 0x04034b50U) return "Compressed zip file offset does not point to start of zip blob"; /* Ok, see if this IS the correct file blob. */ version = fget16LE(fp); flags = fget16LE(fp); method = fget16LE(fp); modtm = fget16LE(fp); moddt = fget16LE(fp); crc = fget32LE(fp); complen = fget32LE(fp); uncomplen = fget32LE(fp); namelen = fget16LE(fp); exlen = fget16LE(fp); /* unused vars. */ (void)uncomplen; (void)modtm; (void)moddt; /* Even if we 'miss', we keep walking back. We 'can' miss if the CRC of file, or some other */ /* binary data happens to have the 0x04034b50 signature, thus giving us a false local header hit. */ if (_crc == crc && _len == complen && (0x14 == version || 0xA == version) && (flags & 1) && (method == 8 || method == 0) && offex==30+namelen+exlen) return ""; return "We could NOT find the internal zip data in this ZIP file"; } static u8 *buf_copy (char *p, int len) { u8 *op = mem_alloc_tiny(len, MEM_ALIGN_NONE); memcpy(op, p, len); return op; } static void init(struct fmt_main *self) { #ifdef PKZIP_USE_MULT_TABLE unsigned short n=0; #endif #ifdef _OPENMP int omp_t; omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); K12 = mem_calloc(sizeof(*K12) * 3, self->params.max_keys_per_crypt); chk = mem_calloc(sizeof(*chk), self->params.max_keys_per_crypt); /* * Precompute the multiply mangling, within several parts of the hash. There is a pattern, * 64k entries long. However the exact same value is produced 4 times in a row, every * time. Thus, we can build a 16k wide array, and then access the array using this * ((val&0xFFFF) >> 2) This is faster on all current HW, since the 16kb array access * (and the and/shift) is faster than performing the whole mult, 2 shifts, 2 adds and * an and (if the compiler can optimize it to that) * * There is a # define at the top of this file that turns this OFF. if that define is * not set, then these mult's will be done in the crypt_all and decrypt functions */ #ifdef PKZIP_USE_MULT_TABLE for (n = 0; n < 16384; n++) mult_tab[n] = (((unsigned)(n*4+3) * (n*4+2)) >> 8) & 0xff; #endif #if USE_PKZIP_MAGIC //static char *MagicTypes[]= { "", "DOC", "XLS", "DOT", "XLT", "EXE", "DLL", "ZIP", "BMP", "DIB", "GIF", "PDF", "GZ", "TGZ", "BZ2", "TZ2", "FLV", "SWF", "MP3", NULL }; //static int MagicToEnum[] = {0, 1, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7, 7, 8, 8, 9, 10, 11, 0}; // decent sources of these: // http://www.garykessler.net/library/file_sigs.html // http://en.wikipedia.org/wiki/List_of_file_signatures // http://toorcon.techpathways.com/uploads/headersig.txt // not available, 2012-12-28) // archive.org still has a version: // http://web.archive.org/web/20110725085828/http://toorcon.techpathways.com/uploads/headersig.txt // there are many more. //case 1: // DOC/XLS SIGS[1].magic_signature[0] = (u8*)str_alloc_copy("\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1"); SIGS[1].magic_sig_len[0] = 8; SIGS[1].magic_signature[1] = buf_copy("\x50\x4B\x03\x04\x14\x00\x06\x00\x08", 10); // a .zip file 'sort of' SIGS[1].magic_sig_len[1] = 9; SIGS[1].magic_signature[2] = buf_copy("\x09\x04\x06\x00\x00\x00\x10\x00\xF6\x05\x5C\x00", 13); // older XLS format (office 95) SIGS[1].magic_sig_len[2] = 12; SIGS[1].magic_signature[3] = buf_copy("\x09\x02\x06\x00\x00\x00\x10\x00\xB9\x04\x5C\x00", 13); // older XLS v2 SIGS[1].magic_sig_len[3] = 12; SIGS[1].magic_signature[4] = buf_copy("\x50\x4B\x03\x04\x14\x00\x00\x00\x00\x00", 11); //DOC Star Writer 6.0 SIGS[1].magic_sig_len[4] = 10; SIGS[1].magic_signature[5] = buf_copy("\x31\xBE\x00\x00\x00\xAB\x00\x00", 9); //DOC MS Word for DOS v6 File SIGS[1].magic_sig_len[5] = 8; SIGS[1].magic_signature[6] = (u8*)str_alloc_copy("\x12\x34\x56\x78\x90\xFF"); //DOC MS Word 6.0 File SIGS[1].magic_sig_len[6] = 6; SIGS[1].magic_signature[7] = (u8*)str_alloc_copy("\x7F\xFE\x34\x0A"); //MS Word File SIGS[1].magic_sig_len[7] = 4; SIGS[1].magic_count = 8; SIGS[1].max_len = 12; //case 2: // Win32/DOS exe file MZ SIGS[2].magic_signature[0] = (u8*)str_alloc_copy("MZ"); SIGS[2].magic_sig_len[0] = 2; SIGS[2].magic_count = 1; SIGS[2].max_len = 2; //case 3: // PKZIP SIGS[3].magic_signature[0] = (u8*)str_alloc_copy("\x50\x4B\x03\x04"); SIGS[3].magic_sig_len[0] = 4; SIGS[3].magic_count = 1; SIGS[3].max_len = 4; //case 4: // BMP SIGS[4].magic_signature[0] = (u8*)str_alloc_copy("BM"); SIGS[4].magic_sig_len[0] = 2; SIGS[4].magic_count = 1; SIGS[4].max_len = 2; //case 5: // GIF SIGS[5].magic_signature[0] = (u8*)str_alloc_copy("GIF87a"); SIGS[5].magic_sig_len[0] = 6; SIGS[5].magic_signature[1] = (u8*)str_alloc_copy("GIF89a"); SIGS[5].magic_sig_len[1] = 6; SIGS[5].magic_count = 2; SIGS[5].max_len = 6; //case 6: // PDF SIGS[6].magic_signature[0] = (u8*)str_alloc_copy("%PDF"); SIGS[6].magic_sig_len[0] = 4; SIGS[6].magic_count = 1; SIGS[6].max_len = 4; //case 7: // GZ SIGS[7].magic_signature[0] = (u8*)str_alloc_copy("\x1F\x8B\x08"); SIGS[7].magic_sig_len[0] = 3; SIGS[7].magic_count = 1; SIGS[7].max_len = 3; //case 8: // BZ2 (there is a 'magic' pi, but byte 4 is 1 to 9, so skip the 'pi') SIGS[8].magic_signature[0] = (u8*)str_alloc_copy("BZh"); SIGS[8].magic_sig_len[0] = 3; SIGS[8].magic_signature[1] = (u8*)str_alloc_copy("BZ0"); SIGS[8].magic_sig_len[1] = 3; SIGS[8].magic_count = 2; SIGS[8].max_len = 3; //case 9: // FLV SIGS[9].magic_signature[0] = (u8*)str_alloc_copy("FLV\x01"); SIGS[9].magic_sig_len[0] = 4; SIGS[9].magic_count = 1; SIGS[9].max_len = 4; //case 10: // SWF SIGS[10].magic_signature[0] = (u8*)str_alloc_copy("FWS"); SIGS[10].magic_sig_len[0] = 5; SIGS[10].magic_count = 1; SIGS[10].max_len = 5; //case 11: // MP3 SIGS[11].magic_signature[0] = (u8*)str_alloc_copy("ID3"); SIGS[11].magic_sig_len[0] = 3; SIGS[11].magic_count = 1; SIGS[11].max_len = 3; SIGS[255].max_len = 64; #endif } static void done(void) { MEM_FREE(chk); MEM_FREE(K12); MEM_FREE(saved_key); } static void set_salt(void *_salt) { salt = *((PKZ_SALT**)_salt); if (salt->H[0].h && salt->H[1].h && salt->H[2].h) return; // we 'late' fixup the salt. salt->H[0].h = &salt->zip_data[0]; salt->H[1].h = &salt->zip_data[1+salt->H[0].datlen]; salt->H[2].h = &salt->zip_data[2+salt->H[0].datlen+salt->H[1].datlen]; } static void *get_salt(char *ciphertext) { /* NOTE, almost NO error checking at all in this function. Proper error checking done in valid() */ static union alignment { unsigned char c[8]; ARCH_WORD_32 a[1]; } a; unsigned char *salt_p = a.c; PKZ_SALT *salt, *psalt; long offset=0; char *H[3] = {0,0,0}; long ex_len[3] = {0,0,0}; u32 offex; int i, j; c8 *p, *cp, *cpalloc = (char*)mem_alloc(strlen(ciphertext)+1); int type2 = 0; /* Needs word align on REQ_ALIGN systems. May crash otherwise (in the sscanf) */ salt = mem_calloc(1, sizeof(PKZ_SALT)); cp = cpalloc; strcpy(cp, ciphertext); if (!strncmp(cp, "$pkzip$", 7)) p = &cp[7]; else { p = &cp[8]; type2 = 1; } cp = strtokm(p, "*"); sscanf(cp, "%x", &(salt->cnt)); cp = strtokm(NULL, "*"); sscanf(cp, "%x", &(salt->chk_bytes)); for(i = 0; i < salt->cnt; ++i) { int data_enum; cp = strtokm(NULL, "*"); data_enum = *cp - '0'; cp = strtokm(NULL, "*"); #if USE_PKZIP_MAGIC { // mingw can't handle %hhx. Use 'normal' %x and assign back to uint_8 var unsigned jnk; sscanf(cp, "%x", &jnk); salt->H[i].magic = (unsigned char)jnk; } salt->H[i].pSig = &SIGS[salt->H[i].magic]; #endif if (data_enum > 1) { cp = strtokm(NULL, "*"); sscanf(cp, "%x", &(salt->compLen)); cp = strtokm(NULL, "*"); sscanf(cp, "%x", &(salt->deCompLen)); cp = strtokm(NULL, "*"); sscanf(cp, "%x", &(salt->crc32)); cp = strtokm(NULL, "*"); sscanf(cp, "%lx", &offset); cp = strtokm(NULL, "*"); sscanf(cp, "%x", &offex); } cp = strtokm(NULL, "*"); sscanf(cp, "%x", &(salt->H[i].compType)); cp = strtokm(NULL, "*"); sscanf(cp, "%x", &(salt->H[i].datlen)); cp = strtokm(NULL, "*"); for (j = 0; j < 4; ++j) { salt->H[i].c <<= 4; salt->H[i].c |= atoi16[ARCH_INDEX(cp[j])]; } if (type2) { cp = strtokm(NULL, "*"); for (j = 0; j < 4; ++j) { salt->H[i].c2 <<= 4; salt->H[i].c2 |= atoi16[ARCH_INDEX(cp[j])]; } } else salt->H[i].c2 = salt->H[i].c; // fake out 2nd hash, by copying first hash cp = strtokm(NULL, "*"); if (data_enum > 1) { /* if 2 or 3, we have the FULL zip blob for decrypting. */ if (data_enum == 3) { /* read from file. */ FILE *fp; fp = fopen(cp, "rb"); if (!fp) { fprintf (stderr, "Error opening file for pkzip data: %s\n", cp); MEM_FREE(cpalloc); return 0; } fseek(fp, offset+offex, SEEK_SET); if (salt->compLen < 16*1024) { /* simply load the whole blob */ ex_len[i] = salt->compLen; H[i] = mem_alloc(salt->compLen); if (fread(H[i], 1, salt->compLen, fp) != salt->compLen) { fprintf (stderr, "Error reading zip file for pkzip data: %s\n", cp); fclose(fp); MEM_FREE(cpalloc); return 0; } fclose(fp); salt->H[i].datlen = salt->compLen; } else { /* Only load a small part (to be used in crypt_all), and set the filename in */ /* the salt->fname string, so that cmp_all can open the file, and buffered */ /* read the zip data only when it 'needs' it. */ strnzcpy(salt->fname, (const char *)cp, sizeof(salt->fname)); salt->offset = offset+offex; ex_len[i] = 384; H[i] = mem_alloc(384); if (fread(H[i], 1, 384, fp) != 384) { fprintf (stderr, "Error reading zip file for pkzip data: %s\n", cp); fclose(fp); MEM_FREE(cpalloc); return 0; } fclose(fp); salt->H[i].datlen = 384; } } else { ex_len[i] = salt->compLen; H[i] = mem_alloc(salt->compLen); for (j = 0; j < salt->H[i].datlen; ++j) H[i][j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])]; } /* we also load this into the 'building' salt */ salt->compType = salt->H[i].compType; /* Now, set the 'is full zip' flag, so we later process as a zip file. */ salt->H[i].full_zip = 1; salt->full_zip_idx = i; } else { ex_len[i] = salt->H[i].datlen; H[i] = mem_alloc(salt->H[i].datlen); for (j = 0; j < salt->H[i].datlen; ++j) H[i][j] = (atoi16[ARCH_INDEX(cp[j*2])]<<4) + atoi16[ARCH_INDEX(cp[j*2+1])]; } } MEM_FREE(cpalloc); // Ok, we want to add some 'logic' to remove the magic testing, except for specific cases. // If the only file blobs we have are stored, and long blobs, then we want magic (3 file, 2 byte checksum does not need magic). // A single 1 byte file, even if deflated, we want to keep magic. (possibly). j = 0; for (i = 0; i < salt->cnt; ++i) { if (salt->H[i].compType == 8) { if (salt->cnt == 1 && salt->chk_bytes == 1) j += 10; else break; } j += 1; } // ok, if j == 1, then we 'might' want to use magic. Otherwise, we want to 'clear' all magic values. if (j >= 20) j = 0; if (j && salt->chk_bytes == 2 && salt->cnt > 1) j = 0; // we do not need to use magic, on 2 or 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking if (j && salt->chk_bytes == 1 && salt->cnt == 3) j = 0; // we do not need to use magic, on 3 stored 2 byte checksum files. We already have 2^32 or 2^48 in the checksum checking if (!j) { for (i = 0; i < salt->cnt; ++i) salt->H[i].magic = 0; // remove any 'magic' logic from this hash. } psalt = mem_calloc(1, sizeof(PKZ_SALT) + ex_len[0]+ex_len[1]+ex_len[2]+2); memcpy(psalt, salt, sizeof(*salt)); memcpy(psalt->zip_data, H[0], ex_len[0]); MEM_FREE(H[0]); if(salt->cnt > 1) memcpy(psalt->zip_data+ex_len[0]+1, H[1], ex_len[1]); MEM_FREE(H[1]); if(salt->cnt > 2) memcpy(psalt->zip_data+ex_len[0]+ex_len[1]+2, H[2], ex_len[2]); MEM_FREE(H[2]); MEM_FREE(salt); psalt->dsalt.salt_alloc_needs_free = 1; // we used mem_calloc, so JtR CAN free our pointer when done with them. // set the JtR core linkage stuff for this dyna_salt memcpy(salt_p, &psalt, sizeof(psalt)); psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(PKZ_SALT, cnt); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(PKZ_SALT, cnt, full_zip_idx, ex_len[0]+ex_len[1]+ex_len[2]+2); return salt_p; } static void set_key(char *key, int index) { /* Keep the PW, so we can return it in get_key if asked to do so */ strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); dirty = 1; } static char *get_key(int index) { return saved_key[index]; } static int cmp_one(void *binary, int idx) { return chk[idx] == 1; } static int cmp_all(void *binary, int count) { int i,j; for (i=j=0; i<count; ++i) j+=chk[i]; /* hopefully addition like this is faster than 'count' conditional if statments */ return j; } /* this function is used by cmp_exact_loadfile. It will load the next * part of the file then decrypt the data, and return just how many * bytes were loaded. * * This function is 'similar' to an fread(). However, it also decrypts data */ static int get_next_decrypted_block(u8 *in, int sizeof_n, FILE *fp, u32 *inp_used, MY_WORD *pkey0, MY_WORD *pkey1, MY_WORD *pkey2) { u32 new_bytes = sizeof_n, k; u8 C; /* we have read all the bytes, we're done */ if (*inp_used >= salt->compLen) return 0; if (*inp_used + new_bytes > salt->compLen) /* this is the last block. Only load the bytes that are left */ new_bytes = salt->compLen - *inp_used; /* return the correct 'offset', so we can track when the file buffer has been fully read */ *inp_used += new_bytes; /* read the data */ if (fread(in, 1, new_bytes, fp) != new_bytes) return 0; /* decrypt the data bytes (in place, in same buffer). Easy to do, only requires 1 temp character variable. */ for (k = 0; k < new_bytes; ++k) { C = PKZ_MULT(in[k],(*pkey2)); pkey0->u = jtr_crc32 (pkey0->u, C); pkey1->u = (pkey1->u + pkey0->c[KB1]) * 134775813 + 1; pkey2->u = jtr_crc32 (pkey2->u, pkey1->c[KB2]); in[k] = C; } /* return the number of bytes we read from the file on this read */ return new_bytes; } /* Ok, this is the more complex example. Here we have to load the file (which may be HUGE) * decrypt the bytes from this file, and then inflate that data, and crc the bytes which we * have inflated from that stream. Then in the end, when we use all input bytes, if we have * inflated the right amount of data, ended up with a Z_STREAM_END, and the proper sized * decompression buffer, and the CRC matches, then we know we have the correct password * * This function is called from cmp_exact(), when cmp_exact finds out we have to decrypt from * the stored .zip file. * * this code is modifications made to the zpipe.c 'example' code from the zlib web site. */ #define CHUNK (64*1024) static int cmp_exact_loadfile(int index) { int ret; u32 have, k; z_stream strm; unsigned char in[CHUNK]; unsigned char out[CHUNK]; FILE *fp; MY_WORD key0, key1, key2; u8 *b, C; u32 inp_used, decomp_len=0; u32 crc = 0xFFFFFFFF; /* Open the zip file, and 'seek' to the proper offset of the binary zip blob */ fp = fopen(salt->fname, "rb"); if (!fp) { fprintf (stderr, "\nERROR, the zip file: %s has been removed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname); return 1; } if (fseek(fp, salt->offset, SEEK_SET)) { fprintf (stderr, "\nERROR, the zip file: %s fseek() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname); fclose(fp); return 1; } /* 'seed' the decryption with the IV. We do NOT use these bytes, they simply seed us. */ key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2]; k=12; if (fread(in, 1, 12, fp) != 12) { fprintf (stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname); fclose(fp); return 1; } b = salt->H[salt->full_zip_idx].h; do { C = PKZ_MULT(*b++,key2); key0.u = jtr_crc32 (key0.u, C); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); } while(--k); /* this is 'sort of' our file pointer. It is the 'index' into the file's encrypted, compressed data buffer. */ /* we have read the 12 bytes of IV data, and updated our keys. Now we start processing the rest of the bytes */ /* to get the data to inflate, and crc check */ inp_used = 12; if (salt->H[salt->full_zip_idx].compType == 0) { // handle a stored blob (we do not have to decrypt it. int avail_in; crc = 0xFFFFFFFF; avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2); while (avail_in) { for (k = 0; k < avail_in; ++k) crc = jtr_crc32(crc,in[k]); avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2); } fclose(fp); return ~crc == salt->crc32; } /* allocate inflate state */ strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.avail_in = 0; strm.next_in = Z_NULL; ret = inflateInit2(&strm, -15); if (ret != Z_OK) /* if zlib is hosed, then likely there is no reason at all to continue. Better to exit, and let the user 'fix' the system */ perror("Error, initializing the libz inflateInit2() system\n"); /* decompress until deflate stream ends or end of file */ do { strm.avail_in = get_next_decrypted_block(in, CHUNK, fp, &inp_used, &key0, &key1, &key2); if (ferror(fp)) { inflateEnd(&strm); fclose(fp); fprintf (stderr, "\nERROR, the zip file: %s fread() failed.\nWe are a possible password has been found, but FULL validation can not be done!\n", salt->fname); return 1; } if (strm.avail_in == 0) break; strm.next_in = in; /* run inflate() on input until output buffer not full */ do { strm.avail_out = CHUNK; strm.next_out = out; ret = inflate(&strm, Z_NO_FLUSH); switch (ret) { case Z_NEED_DICT: case Z_DATA_ERROR: case Z_MEM_ERROR: inflateEnd(&strm); fclose(fp); return 0; } have = CHUNK - strm.avail_out; /* now update our crc value */ for (k = 0; k < have; ++k) crc = jtr_crc32(crc,out[k]); decomp_len += have; } while (strm.avail_out == 0); /* done when inflate() says it's done */ } while (ret != Z_STREAM_END); /* clean up and return */ inflateEnd(&strm); fclose(fp); return ret == Z_STREAM_END && inp_used == salt->compLen && decomp_len == salt->deCompLen && salt->crc32 == ~crc; } static int cmp_exact(char *source, int index) { const u8 *b; u8 C, *decompBuf, *decrBuf, *B; u32 k, crc; MY_WORD key0, key1, key2; z_stream strm; int ret; if (salt->H[salt->full_zip_idx].full_zip == 0) /* we do not have a zip file, this is 'checksum' only * POSSIBLY, we should log and output to screen that * we are not 100% 'sure' we have the right password!! */ return 1; #ifdef ZIP_DEBUG fprintf(stderr, "FULL zip test being done. (pass=%s)\n", saved_key[index]); #endif if (salt->fname[0] == 0) { /* we have the whole zip blob in memory, simply allocate a decrypt buffer, decrypt * in one step, crc and be done with it. This is the 'trivial' type. */ decrBuf = mem_alloc(salt->compLen-12); key0.u = K12[index*3], key1.u = K12[index*3+1], key2.u = K12[index*3+2]; b = salt->H[salt->full_zip_idx].h; k=12; do { C = PKZ_MULT(*b++,key2); key0.u = jtr_crc32 (key0.u, C); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); } while(--k); B = decrBuf; k = salt->compLen-12; do { C = PKZ_MULT(*b++,key2); key0.u = jtr_crc32 (key0.u, C); *B++ = C; key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); } while (--k); if (salt->H[salt->full_zip_idx].compType == 0) { // handle a stored blob (we do not have to decrypt it. crc = 0xFFFFFFFF; for (k = 0; k < salt->compLen-12; ++k) crc = jtr_crc32(crc,decrBuf[k]); MEM_FREE(decrBuf); return ~crc == salt->crc32; } strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = Z_NULL; strm.avail_in = 0; ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */ if (ret != Z_OK) perror("Error, initializing the libz inflateInit2() system\n"); decompBuf = mem_alloc(salt->deCompLen); strm.next_in = decrBuf; strm.avail_in = salt->compLen-12; strm.avail_out = salt->deCompLen; strm.next_out = decompBuf; ret = inflate(&strm, Z_SYNC_FLUSH); inflateEnd(&strm); if (ret != Z_STREAM_END || strm.total_out != salt->deCompLen) { MEM_FREE(decompBuf); MEM_FREE(decrBuf); return 0; } crc = 0xFFFFFFFF; for (k = 0; k < strm.total_out; ++k) crc = jtr_crc32(crc,decompBuf[k]); MEM_FREE(decompBuf); MEM_FREE(decrBuf); return ~crc == salt->crc32; } /* we have a stand alone function to handle this more complex method of * loading from file, decrypting, decompressing, and crc'ing the data * It is complex enough of a task, to have its own function. */ return cmp_exact_loadfile(index); } #if USE_PKZIP_MAGIC const char exBytesUTF8[64] = { 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5 }; static int isLegalUTF8_char(const u8 *source, int length) { u8 a; int len; const u8 *srcptr; if (*source < 0xC0) return 1; len = exBytesUTF8[*source&0x3f]; srcptr = source+len; if (len+1 > length) return -1; switch (len) { default: return -1; /* Everything else falls through when "true"... */ case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1; case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1; case 2: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return -1; switch (*source) { /* no fall-through in this inner switch */ case 0xE0: if (a < 0xA0) return -1; break; case 0xED: if (a > 0x9F) return -1; break; case 0xF0: if (a < 0x90) return -1; break; case 0xF4: if (a > 0x8F) return -1; } case 1: if (*source >= 0x80 && *source < 0xC2) return -1; } if (*source > 0xF4) return -1; return len+1; } static int validate_ascii(const u8 *out, int inplen) { int i; int unicode=0; for (i = 0; i < inplen-1; ++i) { if (out[i] > 0x7E) { // first check to 'see' if this is a valid utf8 character. If so, let it 'pass'. if (unicode) return 0; // in unicode mode, we ONLY handle 'ascii' bytes in the low byte. if (out[i] > 0xC0) { int len; if(i > inplen-4) return 1; len = isLegalUTF8_char(&out[i], 5); if (len < 0) return 0; i += (len-1); } else { if (i) { // check for utf8 BOM \xEF \xBB \xBF if (out[0] == 0xEF && out[1] == 0xBB && out[2] == 0xBF) { i = 2; continue; } /* check for Unicode BOM (FF FE for utf16le, FE FF for utf16be, FF FE 00 00 for utf32le, not sure if 00 00 FE FF is utf32be, but likely is) */ if (out[0] == 0xFF && out[1] == 0xFE) { unicode = 1; i++; continue; } /* unicode BE bom */ if (out[0] == 0xFE && out[1] == 0xFF) { unicode = 1; i += 2; continue; } /* utf32 LE */ if (out[0] == 0xFF && out[1] == 0xFE && out[2] == 0 && out[3] == 0) { unicode = 3; i += 3; continue; } /* utf32 BE bom */ if (out[0] == 0 && out[1] == 0 && out[2] == 0xFE && out[3] == 0xFF) { unicode = 3; i += 6; continue; } // allow a 'single' byte > 0x7E as long as bytes following are ascii. if (out[1] <= 0x7E && out[1] >= 0x20) { ++i; continue; } return 0; } } } else if (out[i] < 0x20) { /* we do not need to deal with DOS EOF char 0x1a, since we will never have the 'end' of the file */ /* we do allow the ESC character for ANSI files, however, they are frequently also binary, so will fail in other places */ if (out[i]!='\n' && out[i]!='\r' && out[i]!='\t' && out[i]!=0x1B) return 0; } i += unicode; // skip the null bytes } return 1; } static int CheckSigs(const u8 *p, int len, ZIP_SIGS *pSig) { int i, j; for (i = 0; i < pSig->magic_count; ++i) { int fnd = 1; u8 *pS = pSig->magic_signature[i]; for (j = 0; j < pSig->magic_sig_len[i]; ++j) { if (p[j] != pS[j]) { fnd = 0; break; } } if (fnd) return 1; } return 0; } #endif /* note, Buf is the 'full' decrypted zip buffer (len bytes long). It DOES contain the first 3 bits, which have already * been decoded, and have told us we had a code 2 (var table block) * all done without BITS(), PULLBYTE(), BITSNEEDED() macros. We 'know' the data we need, and we know that we have * 'enough', so we do not worry about all of the overhead, and validation logic. * * In testing, this function catches ALL bad decryptions, except about 1/300 to 1/350. So, it is not too bad. */ MAYBE_INLINE static int check_inflate_CODE2(u8 *next) { u32 bits, hold, thisget, have, i; int left; u32 ncode; u32 ncount[2]; // ends up being an array of 8 u8 count values. But we can clear it, and later 'check' it with 2 u32 instructions. u8 *count; // this will point to ncount array. NOTE, this is alignment required 'safe' for Sparc systems or others requiring alignment. #if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1) // 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it. hold = *((u32*)next); #else hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24); #endif next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back. hold >>= 3; // we already processed 3 bits count = (u8*)ncount; if (257+(hold&0x1F) > 286) return 0; // nlen, but we do not use it. hold >>= 5; if(1+(hold&0x1F) > 30) return 0; // ndist, but we do not use it. hold >>= 5; ncode = 4+(hold&0xF); hold >>= 4; // we have 15 bits left. hold += ((u32)(*++next)) << 15; hold += ((u32)(*++next)) << 23; // we now have 31 bits. We need to know this for the loop below. bits = 31; // We have 31 bits now, in accum. If we are processing 19 codes, we do 7, then have 10 bits. // Add 16 more and have 26, then use 21, have 5. Then load 16 more, then eat 15 of them. have = 0; ncount[0] = ncount[1] = 0; for (;;) { if (have+7>ncode) thisget = ncode-have; else thisget = 7; have += thisget; bits -= thisget*3; while (thisget--) { ++count[hold&7]; hold>>=3; } if (have == ncode) break; hold += ((u32)(*++next)) << bits; bits += 8; hold += ((u32)(*++next)) << bits; bits += 8; } count[0] = 0; if (!ncount[0] && !ncount[1]) return 0; /* if no codes at all, then simply bail, that is invalid */ /* check for an over-subscribed or incomplete set of lengths */ /* this will catch about 319 out of 320 'bad' passwords that */ /* have made it into this function. Note, only 1/4 of the */ /* passwords which pass the checksum, can make it here. Of */ /* those, we drop 319/320 or about that many (a good check!) */ left = 1; for (i = 1; i <= 7; ++i) { left <<= 1; left -= count[i]; if (left < 0) return 0; /* over-subscribed */ } if (left > 0) return 0; /* incomplete set */ return 1; /* Passed this check! */ } //static code const * const lcode = lenfix; //static code const * const dcode = distfix; /* This function handles inflate CODE type 1. This is a 'fixed' table code. We set the fixed table, */ /* and then inflate some data (without writing anything. If we find any BAD lookback data, we can */ /* return a failure. We have 24 bytes of inflate data, and this almost always is more than enough */ /* to turn up an error. If we find we need more, we will do more than 24 */ MAYBE_INLINE static int check_inflate_CODE1(u8 *next, int left) { u32 whave = 0, op, bits, hold,len; code here; #if (ARCH_LITTLE_ENDIAN==1) && (ARCH_ALLOWS_UNALIGNED==1) // 'speedup' for x86 type systems. pkzip/inflate was designed here, so why not use it. hold = *((u32*)next); #else hold = *next + (((u32)next[1])<<8) + (((u32)next[2])<<16) + (((u32)next[3])<<24); #endif next += 3; // we pre-increment when pulling it in the loop, thus we need to be 1 byte back. left -= 4; hold >>= 3; // we already processed 3 bits bits = 32-3; for (;;) { if (bits < 15) { if (left < 2) return 1; // we are out of bytes. Return we had no error. left -= 2; hold += (u32)(*++next) << bits; bits += 8; hold += (u32)(*++next) << bits; bits += 8; } here=lenfix[hold & 0x1FF]; op = (unsigned)(here.bits); hold >>= op; bits -= op; op = (unsigned)(here.op); if (op == 0) /* literal */ ++whave; else if (op & 16) { /* length base */ len = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { if (!left) return 1; /*we are out of bytes. Return we had no error.*/ --left; hold += (u32)(*++next) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); hold >>= op; bits -= op; } if (bits < 15) { if (left < 2) return 1; /*we are out of bytes. Return we had no error.*/ left -= 2; hold += (u32)(*++next) << bits; bits += 8; hold += (u32)(*++next) << bits; bits += 8; } here = distfix[hold & 0x1F]; // dodist: op = (unsigned)(here.bits); hold >>= op; bits -= op; op = (unsigned)(here.op); if (op & 16) { /* distance base */ u32 dist = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (bits < op) { if (!left) return 1; /*we are out of bytes. Return we had no error.*/ --left; hold += (u32)(*++next) << bits; bits += 8; if (bits < op) { if (!left) return 1; /*we are out of bytes. Return we had no error.*/ --left; hold += (u32)(*++next) << bits; bits += 8; } } dist += (unsigned)hold & ((1U << op) - 1); if (dist > whave) return 0; /*invalid distance too far back*/ hold >>= op; bits -= op; //***** start of patched code from Pavel Semjanov (see original code below) whave += len; } else return 0; /*invalid distance code*/ } else if (op & 32) { // end of block [may present in short sequences, but only at the end.] NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes??? if (left == 0) return 1; return 0; } else { return 0; // invalid literal/length code. } //***** End of patched code from Pavel } } // original code block (for above), prior to patch from Pavel Semjanov [pavel@semjanov.com] // this code would be a direct drop in between the comments starting and stopping with //***** above // also the dodist label was commented out (no longer used). #if 0 whave += dist; } else if ((op & 64) == 0) { /* 2nd level distance code */ here = distfix[here.val + (hold & ((1U << op) - 1))]; goto dodist; } else return 0; /*invalid distance code*/ } else if (op & 64) { // 2nd level length code. //here = lcode[here.val + (hold & ((1U << op) - 1))]; //goto dolen; // this causes an infinite loop. Also, I VERY seriously doubt, this will EVER happen in the first // 24 bytes of code. NOTE, there may be problems, in the fact this causes a inf loop!, but for now, // simply return 0, then debug later. return 0; } else if (op & 32) { // end of block NOTE, we need to find out if we EVER hit the end of a block, at only 24 bytes??? // It is VERY likely we do SHOULD NOT EVER hit this. If that is the case, return that this block is bogus. // check next OP (if we have enough bits left), if CODE=3, fail. If code==0, check return 0; } else { return 0; // invalid literal/length code. } #endif /* * Crypt_all simply performs the checksum .zip validatation of the data. It performs * this for ALL hashes provided. If any of them fail to match, then crypt all puts the * complement of the 'proper' checksum of the first hash into the output. These 2 bytes * are checked against the binary for this salt/password combination. Thus, if any * checksum fails, it will never match binary. However, if ALL of the checksums match * we then put the checksum bytes from the first hash, into our output data. Then, when * the binary check (cmp_all, cmp_one) is performed, it WILL match. NOTE, this does * not mean we have found the password. Just that all hashes quick check checksums * for this password 'work'. */ static int crypt_all(int *pcount, struct db_salt *_salt) { const int _count = *pcount; int idx; #if (ZIP_DEBUG==2) static int CNT, FAILED, FAILED2; ++CNT; #endif // pkzip kinda sucks a little for multi-threading, since there is different amount of work to be // done, depenging upon the password. Thus, we pack in OMP_MOD passwords into each thread, and // hopefully some of the differnces will even themselves out in the end. If we have 2 threads // then thread 1 gets 0 to 127 password, and thread 2 gets 128-256. Once they 'get' their data, // there should be no mutexing of the runtime data, thus the threads should run fast. // Also, since we have 'multiple' files in a .zip file (and multiple checksums), we bail as at the // first time we fail to match checksum. So, there may be some threads which check more checksums. // Again, hopefully globbing many tests into a threads working set will flatten out these differences. #ifdef _OPENMP #pragma omp parallel for private(idx) #endif for (idx = 0; idx < _count; ++idx) { int cur_hash_count = salt->cnt; int cur_hash_idx = -1; MY_WORD key0, key1, key2; u8 C; const u8 *b; u8 curDecryBuf[256]; #if USE_PKZIP_MAGIC u8 curInfBuf[128]; #endif int k, SigChecked; u16 e, e2, v1, v2; z_stream strm; int ret; /* use the pwkey for each hash. We mangle on the 12 bytes of IV to what was computed in the pwkey load. */ if (dirty) { u8 *p = (u8*)saved_key[idx]; /* load the 'pwkey' one time, put it into the K12 array */ key0.u = 0x12345678UL; key1.u = 0x23456789UL; key2.u = 0x34567890UL; do { key0.u = jtr_crc32 (key0.u, *p++); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); } while (*p); K12[idx*3] = key0.u, K12[idx*3+1] = key1.u, K12[idx*3+2] = key2.u; goto SkipKeyLoadInit; } do { // 2nd, and later times through the loop, AND if keys are not dirty (i.e. multiple salts // for the same key load), we do NOT perform the key compute, but instead load the pre-computed // key data from the array. key0.u = K12[idx*3], key1.u = K12[idx*3+1], key2.u = K12[idx*3+2]; SkipKeyLoadInit:; b = salt->H[++cur_hash_idx].h; k=11; e = salt->H[cur_hash_idx].c; e2 = salt->H[cur_hash_idx].c2; do { C = PKZ_MULT(*b++,key2); key0.u = jtr_crc32 (key0.u, C); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); } while(--k); /* if the hash is a 2 byte checksum type, then check that value first */ /* There is no reason to continue if this byte does not check out. */ if (salt->chk_bytes == 2 && C != (e&0xFF) && C != (e2&0xFF)) goto Failed_Bailout; C = PKZ_MULT(*b++,key2); #if 1 // https://github.com/magnumripper/JohnTheRipper/issues/467 // Fixed, JimF. Added checksum test for crc32 and timestamp. if (C != (e>>8) && C != (e2>>8)) goto Failed_Bailout; #endif // Now, update the key data (with that last byte. key0.u = jtr_crc32 (key0.u, C); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); // Ok, we now have validated this checksum. We need to 'do some' extra pkzip validation work. // What we do here, is to decrypt a little data (possibly only 1 byte), and perform a single // 'inflate' check (if type is 8). If type is 0 (stored), and we have a signature check, then // we do that here. Also, if the inflate code is a 0 (stored block), and we do sig check, then // we can do that WITHOUT having to call inflate. however, if there IS a sig check, we will have // to call inflate on 'some' data, to get a few bytes (or error code). Also, if this is a type // 2 or 3, then we do the FULL inflate, CRC check here. e = 0; // First, we want to get the inflate CODE byte (the first one). C = PKZ_MULT(*b++,key2); SigChecked = 0; if ( salt->H[cur_hash_idx].compType == 0) { // handle a stored file. // We can ONLY deal with these IF we are handling 'magic' testing. #if USE_PKZIP_MAGIC // Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate. if (salt->H[cur_hash_idx].pSig->max_len) { int len = salt->H[cur_hash_idx].pSig->max_len; if (len > salt->H[cur_hash_idx].datlen-12) len = salt->H[cur_hash_idx].datlen-12; SigChecked = 1; curDecryBuf[0] = C; for (; e < len;) { key0.u = jtr_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } if (salt->H[cur_hash_idx].magic == 255) { if (!validate_ascii(&curDecryBuf[5], len-5)) goto Failed_Bailout; } else { if (!CheckSigs(curDecryBuf, len, salt->H[cur_hash_idx].pSig)) goto Failed_Bailout; } } #endif continue; } #if 1 // https://github.com/magnumripper/JohnTheRipper/issues/467 // Ok, if this is a code 3, we are done. // Code moved to after the check for stored type. (FIXED) This check was INVALID for a stored type file. if ( (C & 6) == 6) goto Failed_Bailout; #endif if ( (C & 6) == 0) { // Check that checksum2 is 0 or 1. If not, I 'think' we can be done if (C > 1) goto Failed_Bailout; // now get 4 bytes. This is the length. It is made up of 2 16 bit values. // these 2 values are checksumed, so it is easy to tell if the data is WRONG. // correct data is u16_1 == (u16_2^0xFFFF) curDecryBuf[0] = C; for (e = 0; e <= 4; ) { key0.u = jtr_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } v1 = curDecryBuf[1] | (((u16)curDecryBuf[2])<<8); v2 = curDecryBuf[3] | (((u16)curDecryBuf[4])<<8); if (v1 != (v2^0xFFFF)) goto Failed_Bailout; #if USE_PKZIP_MAGIC // Ok, if we have a signature, check it here, WITHOUT having to call zLib's inflate. if (salt->H[cur_hash_idx].pSig->max_len) { int len = salt->H[cur_hash_idx].pSig->max_len + 5; if (len > salt->H[cur_hash_idx].datlen-12) len = salt->H[cur_hash_idx].datlen-12; SigChecked = 1; for (; e < len;) { key0.u = jtr_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } if (salt->H[cur_hash_idx].magic == 255) { if (!validate_ascii(&curDecryBuf[5], len-5)) goto Failed_Bailout; } else { if (!CheckSigs(&curDecryBuf[5], len-5, salt->H[cur_hash_idx].pSig)) goto Failed_Bailout; } } #endif } else { // Ok, now we have handled inflate code type 3 and inflate code 0 (50% of 'random' data) // We now have the 2 'hard' ones left (fixed table, and variable table) curDecryBuf[0] = C; if ((C&6) == 4) { // inflate 'code' 2 (variable table) #if (ZIP_DEBUG==2) static unsigned count, found; ++count; #endif // we need 4 bytes, + 2, + 4 at most. for (; e < 10;) { key0.u = jtr_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } if (!check_inflate_CODE2(curDecryBuf)) goto Failed_Bailout; #if (ZIP_DEBUG==2) fprintf (stderr, "CODE2 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found); #endif } else { int til; #if (ZIP_DEBUG==2) static unsigned count, found; ++count; #endif til = 36; if (salt->H[cur_hash_idx].datlen-12 < til) til = salt->H[cur_hash_idx].datlen-12; for (; e < til;) { key0.u = jtr_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } if (!check_inflate_CODE1(curDecryBuf, til)) goto Failed_Bailout; #if (ZIP_DEBUG==2) fprintf (stderr, "CODE1 Pass=%s count = %u, found = %u\n", saved_key[idx], count, ++found); #endif } } #if USE_PKZIP_MAGIC // Ok, now see if we need to check sigs, or do a FULL inflate/crc check. if (!SigChecked && salt->H[cur_hash_idx].pSig->max_len) { int til = 180; if (salt->H[cur_hash_idx].datlen-12 < til) til = salt->H[cur_hash_idx].datlen-12; for (; e < til;) { key0.u = jtr_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = Z_NULL; strm.avail_in = til; ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */ if (ret != Z_OK) perror("Error, initializing the libz inflateInit2() system\n"); strm.next_in = curDecryBuf; strm.avail_out = sizeof(curInfBuf); strm.next_out = curInfBuf; ret = inflate(&strm, Z_SYNC_FLUSH); inflateEnd(&strm); if (ret != Z_OK) { // we need to handle zips smaller than sizeof curInfBuf. If we find a zip of this // size, the return is Z_STREAM_END, BUT things are fine. if (ret == Z_STREAM_END && salt->deCompLen == strm.total_out) ; // things are ok. else goto Failed_Bailout; } if (!strm.total_out) goto Failed_Bailout; ret = salt->H[cur_hash_idx].pSig->max_len; if (salt->H[cur_hash_idx].magic == 255) { if (!validate_ascii(curInfBuf, strm.total_out)) goto Failed_Bailout; } else { if (strm.total_out < ret) goto Failed_Bailout; if (!CheckSigs(curInfBuf, strm.total_out, salt->H[cur_hash_idx].pSig)) goto Failed_Bailout; } } #endif if (salt->H[cur_hash_idx].full_zip) { u8 inflateBufTmp[1024]; if (salt->compLen > 240 && salt->H[cur_hash_idx].datlen >= 200) { for (;e < 200;) { key0.u = jtr_crc32 (key0.u, curDecryBuf[e]); key1.u = (key1.u + key0.c[KB1]) * 134775813 + 1; key2.u = jtr_crc32 (key2.u, key1.c[KB2]); curDecryBuf[++e] = PKZ_MULT(*b++,key2); } strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.next_in = Z_NULL; strm.avail_in = e; ret = inflateInit2(&strm, -15); /* 'raw', since we do not have gzip header, or gzip crc. .ZIP files are 'raw' implode data. */ if (ret != Z_OK) perror("Error, initializing the libz inflateInit2() system\n"); strm.next_in = curDecryBuf; strm.avail_out = sizeof(inflateBufTmp); strm.next_out = inflateBufTmp; ret = inflate(&strm, Z_SYNC_FLUSH); inflateEnd(&strm); if (ret != Z_OK) { #if (ZIP_DEBUG==2) fprintf(stderr, "fail=%d fail2=%d tot="LLd"\n", ++FAILED, FAILED2, ((long long)CNT)*_count); #endif goto Failed_Bailout; } } goto KnownSuccess; } } while(--cur_hash_count); /* We got a checksum HIT!!!! All hash checksums matched. */ /* We load the proper checksum value for the gethash */ KnownSuccess: ; chk[idx] = 1; continue; Failed_Bailout: ; /* We load the wrong checksum value for the gethash */ chk[idx] = 0; } /* clear the 'dirty' flag. Then on multiple different salt calls, we will not have to */ /* encrypt the passwords again. They will have already been loaded in the K12[] array. */ dirty = 0; return _count; } struct fmt_main fmt_pkzip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_LIBZ */
convolution_3x3_pack8to1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd42_transform_kernel_pack8to1_int8_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt) { // winograd42 transform kernel Mat kernel_tm(6 * 6, inch, outch, (size_t)2u); const short ktm[6][3] = { {6, 0, 0}, {-4, -4, -4}, {-4, 4, -4}, {1, 2, 4}, {1, -2, 4}, {0, 0, 6} }; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { for (int q = 0; q < inch; q++) { const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9; short* kernel_tm0 = kernel_tm.channel(p).row<short>(q); // transform kernel const signed char* k0 = kernel0; const signed char* k1 = kernel0 + 3; const signed char* k2 = kernel0 + 6; // h short tmp[6][3]; for (int i = 0; i < 6; i++) { tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2]; tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2]; tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2]; } // U for (int j = 0; j < 6; j++) { short* tmpp = &tmp[j][0]; for (int i = 0; i < 6; i++) { kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2]; } } } } // interleave // src = 36-inch-outch // dst = 8a-inch/8a-36-outch kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 8 + outch % 8, (size_t)2u * 8, 8); int p = 0; for (; p + 7 < outch; p += 8) { const Mat k0 = kernel_tm.channel(p); const Mat k1 = kernel_tm.channel(p + 1); const Mat k2 = kernel_tm.channel(p + 2); const Mat k3 = kernel_tm.channel(p + 3); const Mat k4 = kernel_tm.channel(p + 4); const Mat k5 = kernel_tm.channel(p + 5); const Mat k6 = kernel_tm.channel(p + 6); const Mat k7 = kernel_tm.channel(p + 7); Mat g0 = kernel_tm_pack8to1.channel(p / 8); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00[1] = k1.row<const short>(q + i)[k]; g00[2] = k2.row<const short>(q + i)[k]; g00[3] = k3.row<const short>(q + i)[k]; g00[4] = k4.row<const short>(q + i)[k]; g00[5] = k5.row<const short>(q + i)[k]; g00[6] = k6.row<const short>(q + i)[k]; g00[7] = k7.row<const short>(q + i)[k]; g00 += 8; } } } } for (; p < outch; p++) { const Mat k0 = kernel_tm.channel(p); Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8); for (int k = 0; k < 36; k++) { short* g00 = g0.row<short>(k); for (int q = 0; q + 7 < inch; q += 8) { for (int i = 0; i < 8; i++) { g00[0] = k0.row<const short>(q + i)[k]; g00 += 1; } } } } } static void conv3x3s1_winograd42_pack8to1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; // size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; // pad to 4n+2 Mat bottom_blob_bordered = bottom_blob; outw = (outw + 3) / 4 * 4; outh = (outh + 3) / 4 * 4; w = outw + 2; h = outh + 2; copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt); // BEGIN transform input Mat bottom_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator); // const float itm[4][4] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob_bordered.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); short tmp[6][6][8]; // tile for (int i = 0; i < h_tm / 6; i++) { for (int j = 0; j < w_tm / 6; j++) { const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8; for (int m = 0; m < 6; m++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r03 = vld1_s8(r0 + 24); int8x8_t _r04 = vld1_s8(r0 + 32); int8x8_t _r05 = vld1_s8(r0 + 40); int8x8_t _v4s8 = vdup_n_s8(4); int8x8_t _v5s8 = vdup_n_s8(5); int16x8_t _v2 = vdupq_n_s16(2); int16x8_t _v4 = vdupq_n_s16(4); // int16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f); int16x8_t _tmp0m = vsubq_s16(vaddw_s8(vmull_s8(_r00, _v4s8), _r04), vmull_s8(_r02, _v5s8)); // int16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f); int16x8_t _tmp1m = vmlsq_s16(vaddl_s8(_r04, _r03), vaddl_s8(_r01, _r02), _v4); // int16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f); int16x8_t _tmp2m = vmlaq_s16(vsubl_s8(_r04, _r03), vsubl_s8(_r01, _r02), _v4); // int16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); int16x8_t _tmp3m = vmlsq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2); // int16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f); int16x8_t _tmp4m = vmlaq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2); // int16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f); int16x8_t _tmp5m = vsubq_s16(vaddw_s8(vmull_s8(_r01, _v4s8), _r05), vmull_s8(_r03, _v5s8)); vst1q_s16(tmp[0][m], _tmp0m); vst1q_s16(tmp[1][m], _tmp1m); vst1q_s16(tmp[2][m], _tmp2m); vst1q_s16(tmp[3][m], _tmp3m); vst1q_s16(tmp[4][m], _tmp4m); vst1q_s16(tmp[5][m], _tmp5m); r0 += w * 8; } short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8; short* r0_tm_1 = r0_tm_0 + tiles * 8; short* r0_tm_2 = r0_tm_0 + tiles * 16; short* r0_tm_3 = r0_tm_0 + tiles * 24; short* r0_tm_4 = r0_tm_0 + tiles * 32; short* r0_tm_5 = r0_tm_0 + tiles * 40; for (int m = 0; m < 6; m++) { int16x8_t _tmp00 = vld1q_s16(tmp[m][0]); int16x8_t _tmp01 = vld1q_s16(tmp[m][1]); int16x8_t _tmp02 = vld1q_s16(tmp[m][2]); int16x8_t _tmp03 = vld1q_s16(tmp[m][3]); int16x8_t _tmp04 = vld1q_s16(tmp[m][4]); int16x8_t _tmp05 = vld1q_s16(tmp[m][5]); int16x8_t _v2 = vdupq_n_s16(2); int16x8_t _v4 = vdupq_n_s16(4); int16x8_t _v5 = vdupq_n_s16(5); int16x8_t _r0tm0 = vmlsq_s16(vmlaq_s16(_tmp04, _tmp00, _v4), _tmp02, _v5); int16x8_t _r0tm1 = vmlsq_s16(vaddq_s16(_tmp04, _tmp03), vaddq_s16(_tmp01, _tmp02), _v4); int16x8_t _r0tm2 = vmlaq_s16(vsubq_s16(_tmp04, _tmp03), vsubq_s16(_tmp01, _tmp02), _v4); int16x8_t _r0tm3 = vmlsq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2); int16x8_t _r0tm4 = vmlaq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2); int16x8_t _r0tm5 = vmlsq_s16(vmlaq_s16(_tmp05, _tmp01, _v4), _tmp03, _v5); vst1q_s16(r0_tm_0, _r0tm0); vst1q_s16(r0_tm_1, _r0tm1); vst1q_s16(r0_tm_2, _r0tm2); vst1q_s16(r0_tm_3, _r0tm3); vst1q_s16(r0_tm_4, _r0tm4); vst1q_s16(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 48; r0_tm_1 += tiles * 48; r0_tm_2 += tiles * 48; r0_tm_3 += tiles * 48; r0_tm_4 += tiles * 48; r0_tm_5 += tiles * 48; } } } } } bottom_blob_bordered = Mat(); // END transform input // BEGIN dot Mat top_blob_tm; { int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = h_tm / 6 * w_tm / 6; // permute // bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator); Mat bottom_blob_tm2; #if __aarch64__ if (tiles >= 8) bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator); else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #else if (tiles >= 4) bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator); else // if (tiles >= 1) bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator); #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int r = 0; r < 36; r++) { Mat tm2 = bottom_blob_tm2.channel(r); // tile int i = 0; #if __aarch64__ for (; i + 7 < tiles; i += 8) { short* tm2p = tm2.row<short>(i / 8); const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); r0 += bottom_blob_tm.cstep * 8; } } #endif for (; i + 3 < tiles; i += 4) { #if __aarch64__ short* tm2p = tm2.row<short>(i / 8 + (i % 8) / 4); #else short* tm2p = tm2.row<short>(i / 4); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { // transpose 8x4 #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vswp d1, d2 \n" "vswp d5, d6 \n" "vswp q1, q2 \n" "vst4.s16 {d0-d3}, [%1 :64]! \n" "vst4.s16 {d4-d7}, [%1 :64]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 8; } } for (; i < tiles; i++) { #if __aarch64__ short* tm2p = tm2.row<short>(i / 8 + (i % 8) / 4 + i % 4); #else short* tm2p = tm2.row<short>(i / 4 + i % 4); #endif const short* r0 = bottom_blob_tm; r0 += (r * tiles + i) * 8; for (int q = 0; q < inch; q++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.s16 {d0-d1}, [%0 :64] \n" "vst1.s16 {d0-d1}, [%1 :64]! \n" : "=r"(r0), // %0 "=r"(tm2p) // %1 : "0"(r0), "1"(tm2p) : "memory", "q0"); #endif // __aarch64__ r0 += bottom_blob_tm.cstep * 8; } } } bottom_blob_tm = Mat(); // permute end top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 8; int* output0_tm = top_blob_tm.channel(p); int* output1_tm = top_blob_tm.channel(p + 1); int* output2_tm = top_blob_tm.channel(p + 2); int* output3_tm = top_blob_tm.channel(p + 3); int* output4_tm = top_blob_tm.channel(p + 4); int* output5_tm = top_blob_tm.channel(p + 5); int* output6_tm = top_blob_tm.channel(p + 6); int* output7_tm = top_blob_tm.channel(p + 7); const Mat kernel01_tm = kernel_tm.channel(p / 8); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 7 < tiles; i += 8) { const short* r0 = bb2.row<const short>(i / 8); const short* kptr = kernel01_tm.row<const short>(r); int nn = inch; // inch always > 0 asm volatile( "eor v16.16b, v16.16b, v16.16b \n" "eor v17.16b, v17.16b, v17.16b \n" "eor v18.16b, v18.16b, v18.16b \n" "eor v19.16b, v19.16b, v19.16b \n" "eor v20.16b, v20.16b, v20.16b \n" "eor v21.16b, v21.16b, v21.16b \n" "eor v22.16b, v22.16b, v22.16b \n" "eor v23.16b, v23.16b, v23.16b \n" "eor v24.16b, v24.16b, v24.16b \n" "eor v25.16b, v25.16b, v25.16b \n" "eor v26.16b, v26.16b, v26.16b \n" "eor v27.16b, v27.16b, v27.16b \n" "eor v28.16b, v28.16b, v28.16b \n" "eor v29.16b, v29.16b, v29.16b \n" "eor v30.16b, v30.16b, v30.16b \n" "eor v31.16b, v31.16b, v31.16b \n" "0: \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%9], #64 \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n" "smlal v16.4s, v8.4h, v0.h[0] \n" "smlal2 v17.4s, v8.8h, v0.h[0] \n" "smlal v18.4s, v8.4h, v0.h[1] \n" "smlal2 v19.4s, v8.8h, v0.h[1] \n" "smlal v20.4s, v8.4h, v0.h[2] \n" "smlal2 v21.4s, v8.8h, v0.h[2] \n" "smlal v22.4s, v8.4h, v0.h[3] \n" "smlal2 v23.4s, v8.8h, v0.h[3] \n" "smlal v24.4s, v8.4h, v0.h[4] \n" "smlal2 v25.4s, v8.8h, v0.h[4] \n" "smlal v26.4s, v8.4h, v0.h[5] \n" "smlal2 v27.4s, v8.8h, v0.h[5] \n" "smlal v28.4s, v8.4h, v0.h[6] \n" "smlal2 v29.4s, v8.8h, v0.h[6] \n" "smlal v30.4s, v8.4h, v0.h[7] \n" "smlal2 v31.4s, v8.8h, v0.h[7] \n" "smlal v16.4s, v9.4h, v1.h[0] \n" "smlal2 v17.4s, v9.8h, v1.h[0] \n" "smlal v18.4s, v9.4h, v1.h[1] \n" "smlal2 v19.4s, v9.8h, v1.h[1] \n" "smlal v20.4s, v9.4h, v1.h[2] \n" "smlal2 v21.4s, v9.8h, v1.h[2] \n" "smlal v22.4s, v9.4h, v1.h[3] \n" "smlal2 v23.4s, v9.8h, v1.h[3] \n" "smlal v24.4s, v9.4h, v1.h[4] \n" "smlal2 v25.4s, v9.8h, v1.h[4] \n" "smlal v26.4s, v9.4h, v1.h[5] \n" "smlal2 v27.4s, v9.8h, v1.h[5] \n" "smlal v28.4s, v9.4h, v1.h[6] \n" "smlal2 v29.4s, v9.8h, v1.h[6] \n" "smlal v30.4s, v9.4h, v1.h[7] \n" "smlal2 v31.4s, v9.8h, v1.h[7] \n" "prfm pldl1keep, [%9, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%9], #64 \n" "smlal v16.4s, v10.4h, v2.h[0] \n" "smlal2 v17.4s, v10.8h, v2.h[0] \n" "smlal v18.4s, v10.4h, v2.h[1] \n" "smlal2 v19.4s, v10.8h, v2.h[1] \n" "smlal v20.4s, v10.4h, v2.h[2] \n" "smlal2 v21.4s, v10.8h, v2.h[2] \n" "smlal v22.4s, v10.4h, v2.h[3] \n" "smlal2 v23.4s, v10.8h, v2.h[3] \n" "smlal v24.4s, v10.4h, v2.h[4] \n" "smlal2 v25.4s, v10.8h, v2.h[4] \n" "smlal v26.4s, v10.4h, v2.h[5] \n" "smlal2 v27.4s, v10.8h, v2.h[5] \n" "smlal v28.4s, v10.4h, v2.h[6] \n" "smlal2 v29.4s, v10.8h, v2.h[6] \n" "smlal v30.4s, v10.4h, v2.h[7] \n" "smlal2 v31.4s, v10.8h, v2.h[7] \n" "prfm pldl1keep, [%10, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n" "smlal v16.4s, v11.4h, v3.h[0] \n" "smlal2 v17.4s, v11.8h, v3.h[0] \n" "smlal v18.4s, v11.4h, v3.h[1] \n" "smlal2 v19.4s, v11.8h, v3.h[1] \n" "smlal v20.4s, v11.4h, v3.h[2] \n" "smlal2 v21.4s, v11.8h, v3.h[2] \n" "smlal v22.4s, v11.4h, v3.h[3] \n" "smlal2 v23.4s, v11.8h, v3.h[3] \n" "smlal v24.4s, v11.4h, v3.h[4] \n" "smlal2 v25.4s, v11.8h, v3.h[4] \n" "smlal v26.4s, v11.4h, v3.h[5] \n" "smlal2 v27.4s, v11.8h, v3.h[5] \n" "smlal v28.4s, v11.4h, v3.h[6] \n" "smlal2 v29.4s, v11.8h, v3.h[6] \n" "smlal v30.4s, v11.4h, v3.h[7] \n" "smlal2 v31.4s, v11.8h, v3.h[7] \n" "smlal v16.4s, v12.4h, v4.h[0] \n" "smlal2 v17.4s, v12.8h, v4.h[0] \n" "smlal v18.4s, v12.4h, v4.h[1] \n" "smlal2 v19.4s, v12.8h, v4.h[1] \n" "smlal v20.4s, v12.4h, v4.h[2] \n" "smlal2 v21.4s, v12.8h, v4.h[2] \n" "smlal v22.4s, v12.4h, v4.h[3] \n" "smlal2 v23.4s, v12.8h, v4.h[3] \n" "smlal v24.4s, v12.4h, v4.h[4] \n" "smlal2 v25.4s, v12.8h, v4.h[4] \n" "smlal v26.4s, v12.4h, v4.h[5] \n" "smlal2 v27.4s, v12.8h, v4.h[5] \n" "smlal v28.4s, v12.4h, v4.h[6] \n" "smlal2 v29.4s, v12.8h, v4.h[6] \n" "smlal v30.4s, v12.4h, v4.h[7] \n" "smlal2 v31.4s, v12.8h, v4.h[7] \n" "smlal v16.4s, v13.4h, v5.h[0] \n" "smlal2 v17.4s, v13.8h, v5.h[0] \n" "smlal v18.4s, v13.4h, v5.h[1] \n" "smlal2 v19.4s, v13.8h, v5.h[1] \n" "smlal v20.4s, v13.4h, v5.h[2] \n" "smlal2 v21.4s, v13.8h, v5.h[2] \n" "smlal v22.4s, v13.4h, v5.h[3] \n" "smlal2 v23.4s, v13.8h, v5.h[3] \n" "smlal v24.4s, v13.4h, v5.h[4] \n" "smlal2 v25.4s, v13.8h, v5.h[4] \n" "smlal v26.4s, v13.4h, v5.h[5] \n" "smlal2 v27.4s, v13.8h, v5.h[5] \n" "smlal v28.4s, v13.4h, v5.h[6] \n" "smlal2 v29.4s, v13.8h, v5.h[6] \n" "smlal v30.4s, v13.4h, v5.h[7] \n" "smlal2 v31.4s, v13.8h, v5.h[7] \n" "smlal v16.4s, v14.4h, v6.h[0] \n" "smlal2 v17.4s, v14.8h, v6.h[0] \n" "smlal v18.4s, v14.4h, v6.h[1] \n" "smlal2 v19.4s, v14.8h, v6.h[1] \n" "smlal v20.4s, v14.4h, v6.h[2] \n" "smlal2 v21.4s, v14.8h, v6.h[2] \n" "smlal v22.4s, v14.4h, v6.h[3] \n" "smlal2 v23.4s, v14.8h, v6.h[3] \n" "smlal v24.4s, v14.4h, v6.h[4] \n" "smlal2 v25.4s, v14.8h, v6.h[4] \n" "smlal v26.4s, v14.4h, v6.h[5] \n" "smlal2 v27.4s, v14.8h, v6.h[5] \n" "smlal v28.4s, v14.4h, v6.h[6] \n" "smlal2 v29.4s, v14.8h, v6.h[6] \n" "smlal v30.4s, v14.4h, v6.h[7] \n" "smlal2 v31.4s, v14.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "smlal v16.4s, v15.4h, v7.h[0] \n" "smlal2 v17.4s, v15.8h, v7.h[0] \n" "smlal v18.4s, v15.4h, v7.h[1] \n" "smlal2 v19.4s, v15.8h, v7.h[1] \n" "smlal v20.4s, v15.4h, v7.h[2] \n" "smlal2 v21.4s, v15.8h, v7.h[2] \n" "smlal v22.4s, v15.4h, v7.h[3] \n" "smlal2 v23.4s, v15.8h, v7.h[3] \n" "smlal v24.4s, v15.4h, v7.h[4] \n" "smlal2 v25.4s, v15.8h, v7.h[4] \n" "smlal v26.4s, v15.4h, v7.h[5] \n" "smlal2 v27.4s, v15.8h, v7.h[5] \n" "smlal v28.4s, v15.4h, v7.h[6] \n" "smlal2 v29.4s, v15.8h, v7.h[6] \n" "smlal v30.4s, v15.4h, v7.h[7] \n" "smlal2 v31.4s, v15.8h, v7.h[7] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" "st1 {v20.4s, v21.4s}, [%3], #32 \n" "st1 {v22.4s, v23.4s}, [%4], #32 \n" "st1 {v24.4s, v25.4s}, [%5], #32 \n" "st1 {v26.4s, v27.4s}, [%6], #32 \n" "st1 {v28.4s, v29.4s}, [%7], #32 \n" "st1 {v30.4s, v31.4s}, [%8], #32 \n" : "=r"(nn), // %0 "=r"(output0_tm), // %1 "=r"(output1_tm), // %2 "=r"(output2_tm), // %3 "=r"(output3_tm), // %4 "=r"(output4_tm), // %5 "=r"(output5_tm), // %6 "=r"(output6_tm), // %7 "=r"(output7_tm), // %8 "=r"(r0), // %9 "=r"(kptr) // %10 : "0"(nn), "1"(output0_tm), "2"(output1_tm), "3"(output2_tm), "4"(output3_tm), "5"(output4_tm), "6"(output5_tm), "7"(output6_tm), "8"(output7_tm), "9"(r0), "10"(kptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } #endif for (; i + 3 < tiles; i += 4) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4); #else const short* r0 = bb2.row<const short>(i / 4); #endif const short* k0 = kernel01_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); int32x4_t _sum4 = vdupq_n_s32(0); int32x4_t _sum5 = vdupq_n_s32(0); int32x4_t _sum6 = vdupq_n_s32(0); int32x4_t _sum7 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _val1 = vld1q_s16(r0 + 8); int16x8_t _val2 = vld1q_s16(r0 + 16); int16x8_t _val3 = vld1q_s16(r0 + 24); int16x8_t _w0 = vld1q_s16(k0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val0), vget_low_s16(_w0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val0), vget_low_s16(_w0), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val0), vget_low_s16(_w0), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val0), vget_low_s16(_w0), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val0), vget_high_s16(_w0), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val0), vget_high_s16(_w0), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val0), vget_high_s16(_w0), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val0), vget_high_s16(_w0), 3); int16x8_t _w1 = vld1q_s16(k0 + 8); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val0), vget_low_s16(_w1), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val0), vget_low_s16(_w1), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val0), vget_low_s16(_w1), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val0), vget_low_s16(_w1), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val0), vget_high_s16(_w1), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val0), vget_high_s16(_w1), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val0), vget_high_s16(_w1), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val0), vget_high_s16(_w1), 3); int16x8_t _w2 = vld1q_s16(k0 + 16); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val1), vget_low_s16(_w2), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val1), vget_low_s16(_w2), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val1), vget_low_s16(_w2), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val1), vget_low_s16(_w2), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val1), vget_high_s16(_w2), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val1), vget_high_s16(_w2), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val1), vget_high_s16(_w2), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val1), vget_high_s16(_w2), 3); int16x8_t _w3 = vld1q_s16(k0 + 24); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val1), vget_low_s16(_w3), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val1), vget_low_s16(_w3), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val1), vget_low_s16(_w3), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val1), vget_low_s16(_w3), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val1), vget_high_s16(_w3), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val1), vget_high_s16(_w3), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val1), vget_high_s16(_w3), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val1), vget_high_s16(_w3), 3); int16x8_t _w4 = vld1q_s16(k0 + 32); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val2), vget_low_s16(_w4), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val2), vget_low_s16(_w4), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val2), vget_low_s16(_w4), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val2), vget_low_s16(_w4), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val2), vget_high_s16(_w4), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val2), vget_high_s16(_w4), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val2), vget_high_s16(_w4), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val2), vget_high_s16(_w4), 3); int16x8_t _w5 = vld1q_s16(k0 + 40); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val2), vget_low_s16(_w5), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val2), vget_low_s16(_w5), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val2), vget_low_s16(_w5), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val2), vget_low_s16(_w5), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val2), vget_high_s16(_w5), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val2), vget_high_s16(_w5), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val2), vget_high_s16(_w5), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val2), vget_high_s16(_w5), 3); int16x8_t _w6 = vld1q_s16(k0 + 48); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val3), vget_low_s16(_w6), 0); _sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val3), vget_low_s16(_w6), 1); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val3), vget_low_s16(_w6), 2); _sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val3), vget_low_s16(_w6), 3); _sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val3), vget_high_s16(_w6), 0); _sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val3), vget_high_s16(_w6), 1); _sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val3), vget_high_s16(_w6), 2); _sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val3), vget_high_s16(_w6), 3); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val3), vget_low_s16(_w7), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val3), vget_low_s16(_w7), 1); _sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val3), vget_low_s16(_w7), 2); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val3), vget_low_s16(_w7), 3); _sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val3), vget_high_s16(_w7), 0); _sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val3), vget_high_s16(_w7), 1); _sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val3), vget_high_s16(_w7), 2); _sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val3), vget_high_s16(_w7), 3); r0 += 32; k0 += 64; } vst1q_s32(output0_tm, _sum0); vst1q_s32(output1_tm, _sum1); vst1q_s32(output2_tm, _sum2); vst1q_s32(output3_tm, _sum3); vst1q_s32(output4_tm, _sum4); vst1q_s32(output5_tm, _sum5); vst1q_s32(output6_tm, _sum6); vst1q_s32(output7_tm, _sum7); output0_tm += 4; output1_tm += 4; output2_tm += 4; output3_tm += 4; output4_tm += 4; output5_tm += 4; output6_tm += 4; output7_tm += 4; } for (; i < tiles; i++) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4 + i % 4); #else const short* r0 = bb2.row<const short>(i / 4 + i % 4); #endif const short* k0 = kernel01_tm.row<const short>(r); int nn = inch; // inch always > 0 int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int j = 0; j < nn; j++) { int16x8_t _val0 = vld1q_s16(r0); int16x8_t _w0 = vld1q_s16(k0); int16x8_t _w1 = vld1q_s16(k0 + 8); int16x8_t _w2 = vld1q_s16(k0 + 16); int16x8_t _w3 = vld1q_s16(k0 + 24); int16x8_t _w4 = vld1q_s16(k0 + 32); int16x8_t _w5 = vld1q_s16(k0 + 40); int16x8_t _w6 = vld1q_s16(k0 + 48); int16x8_t _w7 = vld1q_s16(k0 + 56); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3); r0 += 8; k0 += 64; } output0_tm[0] = vgetq_lane_s32(_sum0, 0); output1_tm[0] = vgetq_lane_s32(_sum0, 1); output2_tm[0] = vgetq_lane_s32(_sum0, 2); output3_tm[0] = vgetq_lane_s32(_sum0, 3); output4_tm[0] = vgetq_lane_s32(_sum1, 0); output5_tm[0] = vgetq_lane_s32(_sum1, 1); output6_tm[0] = vgetq_lane_s32(_sum1, 2); output7_tm[0] = vgetq_lane_s32(_sum1, 3); output0_tm += 1; output1_tm += 1; output2_tm += 1; output3_tm += 1; output4_tm += 1; output5_tm += 1; output6_tm += 1; output7_tm += 1; } } } remain_outch_start += nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { int* output0_tm = top_blob_tm.channel(p); const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8); for (int r = 0; r < 36; r++) { const Mat bb2 = bottom_blob_tm2.channel(r); int i = 0; #if __aarch64__ for (; i + 7 < tiles; i += 8) { const short* r0 = bb2.row<const short>(i / 8); const short* kptr = kernel0_tm.row<const short>(r); int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); int32x4_t _sum2 = vdupq_n_s32(0); int32x4_t _sum3 = vdupq_n_s32(0); for (int q = 0; q < inch; q++) { int16x8_t _r0 = vld1q_s16(r0); int16x8_t _r1 = vld1q_s16(r0 + 8); int16x8_t _r2 = vld1q_s16(r0 + 16); int16x8_t _r3 = vld1q_s16(r0 + 24); int16x8_t _r4 = vld1q_s16(r0 + 32); int16x8_t _r5 = vld1q_s16(r0 + 40); int16x8_t _r6 = vld1q_s16(r0 + 48); int16x8_t _r7 = vld1q_s16(r0 + 56); int16x8_t _k0 = vld1q_s16(kptr); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r0), vget_low_s16(_k0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r1), vget_low_s16(_k0), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r1), vget_low_s16(_k0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2), vget_low_s16(_k0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r2), vget_low_s16(_k0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r3), vget_low_s16(_k0), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r3), vget_low_s16(_k0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r4), vget_high_s16(_k0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r4), vget_high_s16(_k0), 0); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r5), vget_high_s16(_k0), 1); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r5), vget_high_s16(_k0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r6), vget_high_s16(_k0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r6), vget_high_s16(_k0), 2); _sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r7), vget_high_s16(_k0), 3); _sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r7), vget_high_s16(_k0), 3); kptr += 8; r0 += 64; } _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); vst1q_s32(output0_tm, _sum0); vst1q_s32(output0_tm + 4, _sum1); output0_tm += 8; } #endif for (; i + 3 < tiles; i += 4) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4); #else const short* r0 = bb2.row<const short>(i / 4); #endif const short* kptr = kernel0_tm.row<const short>(r); int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int q = 0; q < inch; q++) { int16x8_t _r0 = vld1q_s16(r0); int16x8_t _r1 = vld1q_s16(r0 + 8); int16x8_t _r2 = vld1q_s16(r0 + 16); int16x8_t _r3 = vld1q_s16(r0 + 24); int16x8_t _k0 = vld1q_s16(kptr); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r0), vget_low_s16(_k0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r1), vget_low_s16(_k0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r1), vget_low_s16(_k0), 3); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2), vget_high_s16(_k0), 0); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r2), vget_high_s16(_k0), 1); _sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r3), vget_high_s16(_k0), 2); _sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r3), vget_high_s16(_k0), 3); kptr += 8; r0 += 32; } int32x4_t _sum01 = vaddq_s32(_sum0, _sum1); vst1q_s32(output0_tm, _sum01); output0_tm += 4; } for (; i < tiles; i++) { #if __aarch64__ const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4 + i % 4); #else const short* r0 = bb2.row<const short>(i / 4 + i % 4); #endif const short* kptr = kernel0_tm.row<const short>(r); int32x4_t _sum0 = vdupq_n_s32(0); int32x4_t _sum1 = vdupq_n_s32(0); for (int q = 0; q < inch; q++) { int16x8_t _r0 = vld1q_s16(r0); int16x8_t _k0 = vld1q_s16(kptr); _sum0 = vmlal_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0)); _sum1 = vmlal_s16(_sum1, vget_high_s16(_r0), vget_high_s16(_k0)); kptr += 8; r0 += 8; } int32x4_t _sum = vaddq_s32(_sum0, _sum1); #if __aarch64__ int sum = vaddvq_s32(_sum); // dot #else int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum)); _ss = vpadd_s32(_ss, _ss); int sum = vget_lane_s32(_ss, 0); #endif output0_tm[0] = sum; output0_tm++; } } } } bottom_blob_tm = Mat(); // END dot // BEGIN transform output Mat top_blob_bordered; if (outw == top_blob.w && outh == top_blob.h) { top_blob_bordered = top_blob; } else { top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator); } { // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 int w_tm = outw / 4 * 6; int h_tm = outh / 4 * 6; const int tiles = w_tm / 6 * h_tm / 6; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob_bordered.channel(p); int tmp[4][6]; // tile for (int i = 0; i < outh / 4; i++) { for (int j = 0; j < outw / 4; j++) { // top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator); const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1; const int* output0_tm_1 = output0_tm_0 + tiles * 1; const int* output0_tm_2 = output0_tm_0 + tiles * 2; const int* output0_tm_3 = output0_tm_0 + tiles * 3; const int* output0_tm_4 = output0_tm_0 + tiles * 4; const int* output0_tm_5 = output0_tm_0 + tiles * 5; int* output0 = out0.row<int>(i * 4) + j * 4; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 // TODO neon optimize for (int m = 0; m < 5; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b; tmp[1][m] = tmp13a + tmp13b * 2; tmp[2][m] = tmp02a + tmp02b * 4; tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 5; m < 6; m++) { int tmp02a = output0_tm_1[0] + output0_tm_2[0]; int tmp13a = output0_tm_1[0] - output0_tm_2[0]; int tmp02b = output0_tm_3[0] + output0_tm_4[0]; int tmp13b = output0_tm_3[0] - output0_tm_4[0]; tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4; tmp[1][m] = (tmp13a + tmp13b * 2) * 4; tmp[2][m] = (tmp02a + tmp02b * 4) * 4; tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4; output0_tm_0 += tiles * 6; output0_tm_1 += tiles * 6; output0_tm_2 += tiles * 6; output0_tm_3 += tiles * 6; output0_tm_4 += tiles * 6; output0_tm_5 += tiles * 6; } for (int m = 0; m < 4; m++) { const int* tmp0 = tmp[m]; int tmp02a = tmp0[1] + tmp0[2]; int tmp13a = tmp0[1] - tmp0[2]; int tmp02b = tmp0[3] + tmp0[4]; int tmp13b = tmp0[3] - tmp0[4]; output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576; output0[1] = (tmp13a + tmp13b * 2) / 576; output0[2] = (tmp02a + tmp02b * 4) / 576; output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576; output0 += outw; } } } } } // END transform output // cut result pad copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt); }
solution.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <unistd.h> #include <omp.h> // Определение функции double Func(double x) { // Недействительные значения не должны вносить вклад в интеграл if (x > 2) { return 0; } return sqrt(4 - x*x); } // Формула Котеса рассчета определенного интеграла для равномерной сетки double Integral(size_t left_index, size_t right_index, double h) { double I = (Func(right_index * h) + Func(left_index * h)) / 2; for (size_t i = left_index + 1; i < right_index; i++) { I += Func(i * h); } return I * h; } int main(int argc, char **argv) { // Количество шагов size_t N = 1000000; // Запрошенное кол-во процессов int size = 1; // Количество последовательных выполнений программы // для получения среднего времени выполнения size_t numexp = 1; if (argc > 1) { N = atoll(argv[1]); if (argc > 2) { size = atoi(argv[2]); if (argc > 3) { numexp = atoll(argv[3]); } } } // Задаем границы интегрирования double a = 0, b = 2; // Задаем мелкость разбиения отрезка double h = (b - a) / N; double result = 0.0; for (size_t i = 0; i < numexp; i++) { // Устанавливаем требуемое кол-во процессов omp_set_num_threads(size); #pragma omp parallel { // Устанавливаем ранг процесса int rank = omp_get_thread_num(); // Передаем каждому процессу "свои" индексы интегрирования size_t left_index = rank * (N / size); size_t right_index = (rank != size - 1) ? (rank + 1) * (N / size) : N; double integral = Integral(left_index, right_index, h); // Определяем интеграл на заданном интервале #pragma omp critical result += integral; } } // Вывод кол-ва процессов, используемого программой, и значение интеграла printf("%d %lf\n", size, result / numexp); return EXIT_SUCCESS; }
GB_binop__le_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__le_uint64) // A.*B function (eWiseMult): GB (_AemultB_01__le_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__le_uint64) // A.*B function (eWiseMult): GB (_AemultB_03__le_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__le_uint64) // A*D function (colscale): GB (_AxD__le_uint64) // D*A function (rowscale): GB (_DxB__le_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__le_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__le_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_uint64) // C=scalar+B GB (_bind1st__le_uint64) // C=scalar+B' GB (_bind1st_tran__le_uint64) // C=A+scalar GB (_bind2nd__le_uint64) // C=A'+scalar GB (_bind2nd_tran__le_uint64) // C type: bool // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT64 || GxB_NO_LE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__le_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__le_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__le_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__le_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__le_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__le_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__le_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__le_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__le_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__le_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__le_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__le_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__le_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__le_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__isge_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__isge_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint64) // A*D function (colscale): GB (_AxD__isge_uint64) // D*A function (rowscale): GB (_DxB__isge_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__isge_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__isge_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint64) // C=scalar+B GB (_bind1st__isge_uint64) // C=scalar+B' GB (_bind1st_tran__isge_uint64) // C=A+scalar GB (_bind2nd__isge_uint64) // C=A'+scalar GB (_bind2nd_tran__isge_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x >= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISGE || GxB_NO_UINT64 || GxB_NO_ISGE_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isge_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isge_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isge_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isge_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isge_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isge_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isge_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB (_bind1st_tran__isge_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB (_bind2nd_tran__isge_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
THTensorMath.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/THTensorMath.c" #else #ifndef NAN #define NAN (nan(NULL)) #endif #ifdef _OPENMP #include <omp.h> #endif #define TH_OMP_OVERHEAD_THRESHOLD 100000 #ifdef _OPENMP #ifndef _WIN32 #define PRAGMA(P) _Pragma(#P) #else #define PRAGMA(P) __pragma(P) #endif #define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \ { \ ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR); \ PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \ { \ size_t num_threads = omp_get_num_threads(); \ size_t tid = omp_get_thread_num(); \ ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR##_len = TH_TENSOR_end - TH_TENSOR_offset; \ TYPE *TENSOR##_data = THTensor_(data)(TENSOR) + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY_CONTIG(TYPE, TENSOR, CODE) \ { \ TYPE *TENSOR##_data = THTensor_(data)(TENSOR); \ ptrdiff_t TENSOR##_len = THTensor_(nElement)(TENSOR); \ CODE \ } #endif #ifdef _OPENMP #define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ { \ ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \ PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \ { \ size_t num_threads = omp_get_num_threads(); \ size_t tid = omp_get_thread_num(); \ ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY2_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, CODE) \ { \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \ ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ CODE \ } #endif #ifdef _OPENMP #define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ { \ ptrdiff_t TH_TENSOR_size = THTensor_(nElement)(TENSOR1); \ PRAGMA(omp parallel if (TH_TENSOR_size > TH_OMP_OVERHEAD_THRESHOLD)) \ { \ size_t num_threads = omp_get_num_threads(); \ size_t tid = omp_get_thread_num(); \ ptrdiff_t TH_TENSOR_offset = tid * (TH_TENSOR_size / num_threads); \ ptrdiff_t TH_TENSOR_end = tid == num_threads - 1 ? TH_TENSOR_size : \ TH_TENSOR_offset + TH_TENSOR_size / num_threads; \ ptrdiff_t TENSOR1##_len = TH_TENSOR_end - TH_TENSOR_offset; \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1) + TH_TENSOR_offset; \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2) + TH_TENSOR_offset; \ TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3) + TH_TENSOR_offset; \ CODE \ } \ } #else #define TH_TENSOR_APPLY3_CONTIG(TYPE1, TENSOR1, TYPE2, TENSOR2, TYPE3, TENSOR3, CODE) \ { \ TYPE1 *TENSOR1##_data = THTensor_(data)(TENSOR1); \ TYPE2 *TENSOR2##_data = THTensor_(data)(TENSOR2); \ TYPE3 *TENSOR3##_data = THTensor_(data)(TENSOR3); \ ptrdiff_t TENSOR1##_len = THTensor_(nElement)(TENSOR1); \ CODE \ } #endif void THTensor_(fill)(THTensor *r_, real value) { if (THTensor_(isContiguous)(r_) || THTensor_(isTransposed)(r_)) { TH_TENSOR_APPLY_CONTIG(real, r_, THVector_(fill)(r__data, value, r__len);); } else { TH_TENSOR_APPLY(real, r_, if (r__stride == 1) { THVector_(fill)(r__data, value, r__size); r__i = r__size; r__data += r__stride * r__size; break; } else { *r__data = value; } ); } } void THTensor_(zero)(THTensor *r_) { THTensor_(fill)(r_, 0); } void THTensor_(maskedFill)(THTensor *tensor, THByteTensor *mask, real value) { TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); THFree(tensor_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { *tensor_data = value; }); } void THTensor_(maskedCopy)(THTensor *tensor, THByteTensor *mask, THTensor* src ) { THTensor *srct = THTensor_(newContiguous)(src); real *src_data = THTensor_(data)(srct); ptrdiff_t cntr = 0; ptrdiff_t nelem = THTensor_(nElement)(srct); if (THTensor_(nElement)(tensor) != THByteTensor_nElement(mask)) { THTensor_(free)(srct); THError("Number of elements of destination tensor != Number of elements in mask"); } TH_TENSOR_APPLY2(real, tensor, unsigned char, mask, if (*mask_data > 1) { THTensor_(free)(srct); THFree(mask_counter); THFree(tensor_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { if (cntr == nelem) { THTensor_(free)(srct); THFree(mask_counter); THFree(tensor_counter); THError("Number of elements of src < number of ones in mask"); } *tensor_data = *src_data; src_data++; cntr++; }); THTensor_(free)(srct); } void THTensor_(maskedSelect)(THTensor *tensor, THTensor *src, THByteTensor *mask) { ptrdiff_t numel = THByteTensor_sumall(mask); real *tensor_data; #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif THTensor_(resize1d)(tensor,numel); tensor_data = THTensor_(data)(tensor); TH_TENSOR_APPLY2(real, src, unsigned char, mask, if (*mask_data > 1) { THFree(mask_counter); THFree(src_counter); THError("Mask tensor can take 0 and 1 values only"); } else if (*mask_data == 1) { *tensor_data = *src_data; tensor_data++; }); } // Finds non-zero elements of a tensor and returns their subscripts void THTensor_(nonzero)(THLongTensor *subscript, THTensor *tensor) { ptrdiff_t numel = 0; int64_t *subscript_data; int64_t i = 0; int64_t dim; int64_t div = 1; #ifdef TH_REAL_IS_HALF #define IS_NONZERO(val) ((val.x & 0x7fff) != 0) #else #define IS_NONZERO(val) ((val)!=0) #endif /* First Pass to determine size of subscripts */ TH_TENSOR_APPLY(real, tensor, if IS_NONZERO(*tensor_data) { ++numel; }); #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif THLongTensor_resize2d(subscript, numel, tensor->nDimension); /* Second pass populates subscripts */ subscript_data = THLongTensor_data(subscript); TH_TENSOR_APPLY(real, tensor, if IS_NONZERO(*tensor_data) { div = 1; for (dim = tensor->nDimension - 1; dim >= 0; dim--) { *(subscript_data + dim) = (i/div) % tensor->size[dim]; div *= tensor->size[dim]; } subscript_data += tensor->nDimension; } ++i;); } void THTensor_(indexSelect)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { ptrdiff_t i, numel; THLongStorage *newSize; THTensor *tSlice, *sSlice; int64_t *index_data; real *tensor_data, *src_data; THArgCheck(index->nDimension <= 1, 3, "Index is supposed to be an empty tensor or a vector"); THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); THArgCheck(src->nDimension > 0, 2, "Source tensor is empty"); numel = THLongTensor_nElement(index); newSize = THLongStorage_newWithSize(src->nDimension); THLongStorage_rawCopy(newSize,src->size); #ifdef DEBUG THAssert(numel <= LONG_MAX); #endif newSize->data[dim] = numel; THTensor_(resize)(tensor,newSize,NULL); THLongStorage_free(newSize); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (dim == 0 && THTensor_(isContiguous)(src) && THTensor_(isContiguous)(tensor)) { tensor_data = THTensor_(data)(tensor); src_data = THTensor_(data)(src); ptrdiff_t rowsize = THTensor_(nElement)(src) / src->size[0]; // check that the indices are within range int64_t max = src->size[0] - 1 + TH_INDEX_BASE; for (i=0; i<numel; i++) { if (index_data[i] < TH_INDEX_BASE || index_data[i] > max) { THLongTensor_free(index); THError("index out of range"); } } if (src->nDimension == 1) { #pragma omp parallel for if(numel > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) tensor_data[i] = src_data[index_data[i] - TH_INDEX_BASE]; } else { #pragma omp parallel for if(numel*rowsize > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<numel; i++) memcpy(tensor_data + i*rowsize, src_data + (index_data[i] - TH_INDEX_BASE)*rowsize, rowsize*sizeof(real)); } } else if (src->nDimension == 1) { for (i=0; i<numel; i++) THTensor_(set1d)(tensor,i,THTensor_(get1d)(src,index_data[i] - TH_INDEX_BASE)); } else { for (i=0; i<numel; i++) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor, dim, i); THTensor_(select)(sSlice, src, dim, index_data[i] - TH_INDEX_BASE); THTensor_(copy)(tSlice, sSlice); THTensor_(free)(tSlice); THTensor_(free)(sSlice); } } THLongTensor_free(index); } void THTensor_(indexCopy)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { ptrdiff_t i, numel; THTensor *tSlice, *sSlice; int64_t *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4, "Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (tensor->nDimension > 1 ) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); for (i=0; i<numel; i++) { THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE); THTensor_(select)(sSlice, src, dim, i); THTensor_(copy)(tSlice, sSlice); } THTensor_(free)(tSlice); THTensor_(free)(sSlice); } else { for (i=0; i<numel; i++) { THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i)); } } THLongTensor_free(index); } static ptrdiff_t THTensor_(dataOffset)(THTensor* tensor, ptrdiff_t linearIndex) { int64_t *size = tensor->size; int64_t *stride = tensor->stride; int nDim = tensor->nDimension; ptrdiff_t dataOffset = 0; for (int i = nDim - 1; i >= 0; i--) { dataOffset += (linearIndex % size[i]) * stride[i]; linearIndex /= size[i]; } return dataOffset; } static int64_t THTensor_(wrapLinearIndex)(int64_t linearIndex, int64_t numel) { THArgCheck(linearIndex < numel && linearIndex >= -numel, 2, "out of range: %d out of %d", (int)linearIndex, (int)numel); return linearIndex < 0 ? linearIndex + numel : linearIndex; } void THTensor_(take)(THTensor *r_, THTensor *src, THLongTensor *index) { THTensor_(resizeNd)(r_, index->nDimension, index->size, NULL); THTensor* dst = THTensor_(newContiguous)(r_); index = THLongTensor_newContiguous(index); int64_t* index_data = THLongTensor_data(index); ptrdiff_t srcElements = THTensor_(nElement)(src); real* src_data = THTensor_(data)(src); real* dst_data = THTensor_(data)(dst); ptrdiff_t nIndices = THLongTensor_nElement(index); if (THTensor_(isContiguous)(src)) { ptrdiff_t i; #pragma omp parallel for if(nIndices > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i = 0; i < nIndices; i++) { int64_t linearIndex = THTensor_(wrapLinearIndex)(index_data[i], srcElements); dst_data[i] = src_data[linearIndex]; } } else { ptrdiff_t i; #pragma omp parallel for if(nIndices > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i = 0; i < nIndices; i++) { int64_t linearIndex = THTensor_(wrapLinearIndex)(index_data[i], srcElements); int64_t dataOffset = THTensor_(dataOffset)(src, linearIndex); dst_data[i] = src_data[dataOffset]; } } THLongTensor_free(index); THTensor_(freeCopyTo)(dst, r_); } void THTensor_(put)(THTensor *tensor, THLongTensor *index, THTensor *src, int accumulate) { THArgCheck(THLongTensor_nElement(index) == THTensor_(nElement)(src), 3, "src should have the same number of elements as index"); index = THLongTensor_newContiguous(index); src = THTensor_(newContiguous)(src); real* data = THTensor_(data)(tensor); ptrdiff_t numel = THTensor_(nElement)(tensor); int is_contiguous = THTensor_(isContiguous)(tensor); TH_TENSOR_APPLY2(int64_t, index, real, src, int64_t linearIndex = THTensor_(wrapLinearIndex)(*index_data, numel); int64_t dataOffset = is_contiguous ? linearIndex : THTensor_(dataOffset)(tensor, linearIndex); if (accumulate) { data[dataOffset] += *src_data; } else { data[dataOffset] = *src_data; } ); THTensor_(free)(src); THLongTensor_free(index); } void THTensor_(indexAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { ptrdiff_t i, numel; THTensor *tSlice, *sSlice; int64_t *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < src->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); THArgCheck(numel == src->size[dim],4,"Number of indices should be equal to source:size(dim)"); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); if (tensor->nDimension > 1) { tSlice = THTensor_(new)(); sSlice = THTensor_(new)(); for (i=0; i<numel; i++) { THTensor_(select)(tSlice, tensor, dim, index_data[i] - TH_INDEX_BASE); THTensor_(select)(sSlice, src, dim, i); THTensor_(cadd)(tSlice, tSlice, 1.0, sSlice); } THTensor_(free)(tSlice); THTensor_(free)(sSlice); } else { for (i=0; i<numel; i++) { THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, THTensor_(get1d)(src,i) + THTensor_(get1d)(tensor,index_data[i] - TH_INDEX_BASE)); } } THLongTensor_free(index); } void THTensor_(indexFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { ptrdiff_t i, numel; THTensor *tSlice; int64_t *index_data; numel = THLongTensor_nElement(index); THArgCheck(index->nDimension == 1, 3, "Index is supposed to be a vector"); THArgCheck(dim < tensor->nDimension, 4,"Indexing dim %d is out of bounds of tensor", dim + TH_INDEX_BASE); index = THLongTensor_newContiguous(index); index_data = THLongTensor_data(index); for (i=0; i<numel; i++) { if (tensor->nDimension > 1) { tSlice = THTensor_(new)(); THTensor_(select)(tSlice, tensor,dim,index_data[i] - TH_INDEX_BASE); THTensor_(fill)(tSlice, val); THTensor_(free)(tSlice); } else { THTensor_(set1d)(tensor, index_data[i] - TH_INDEX_BASE, val); } } THLongTensor_free(index); } void THTensor_(gather)(THTensor *tensor, THTensor *src, int dim, THLongTensor *index) { int64_t elems_per_row, i, idx; THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(src), 4, "Index tensor must have same dimensions as input tensor"); THArgCheck(dim >= 0 && dim < THTensor_(nDimension)(tensor), 3, "Index dimension is out of bounds"); THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 2, "Input tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= src_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in gather"); } *(tensor_data + i*tensor_stride) = src_data[(idx - TH_INDEX_BASE) * src_stride]; }) } void THTensor_(scatter)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { int64_t elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4, "Input tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); // Assumes TENSOR1 is real // TENSOR2 is src // TENSOR3 is index // Tests: // 1. index->size[d] <= src->size[d] for all d // 2. index->size[d] <= real->size[d] for all d != dim #define TH_TENSOR_DIM_APPLY3_SIZE_SCATTER(TENSOR1, TENSOR2, TENSOR3, DIMENSION) \ { \ int shape_check_flag = 0; \ for(TH_TENSOR_DIM_APPLY_i = 0; TH_TENSOR_DIM_APPLY_i < TENSOR1->nDimension; TH_TENSOR_DIM_APPLY_i++) \ { \ int64_t TENSOR3##_dim_size = TENSOR3->size[TH_TENSOR_DIM_APPLY_i]; \ if (TH_TENSOR_DIM_APPLY_i != DIMENSION) { \ if (TENSOR3##_dim_size > TENSOR1->size[TH_TENSOR_DIM_APPLY_i]) { \ shape_check_flag = 1; \ break; \ } \ } \ if (TENSOR3##_dim_size > TENSOR2->size[TH_TENSOR_DIM_APPLY_i]) { \ shape_check_flag = 1; \ break; \ } \ } \ if (shape_check_flag == 1) { \ THDescBuff T1buff = _THSizeDesc(TENSOR1->size, TENSOR1->nDimension); \ THDescBuff T2buff = _THSizeDesc(TENSOR2->size, TENSOR2->nDimension); \ THDescBuff T3buff = _THSizeDesc(TENSOR3->size, TENSOR3->nDimension); \ THError("Expected %s %s to be smaller size than %s %s and to be smaller than %s %s apart from dimension %d", \ #TENSOR3, T3buff.str, #TENSOR2, T2buff.str, #TENSOR1, T1buff.str, DIMENSION); \ } \ } TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, TH_TENSOR_DIM_APPLY3_SIZE_SCATTER, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatter"); } tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = *(src_data + i*src_stride); }) } void THTensor_(scatterAdd)(THTensor *tensor, int dim, THLongTensor *index, THTensor *src) { int64_t elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); THArgCheck(THTensor_(nDimension)(src) == THTensor_(nDimension)(tensor), 4, "Input tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY3(real, tensor, real, src, int64_t, index, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatterAdd"); } tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] += *(src_data + i*src_stride); }) } void THTensor_(scatterFill)(THTensor *tensor, int dim, THLongTensor *index, real val) { int64_t elems_per_row, i, idx; THArgCheck(dim < THTensor_(nDimension)(tensor), 2, "Index dimension is out of bounds"); THArgCheck(THLongTensor_nDimension(index) == THTensor_(nDimension)(tensor), 3, "Index tensor must have same dimensions as output tensor"); elems_per_row = THLongTensor_size(index, dim); TH_TENSOR_DIM_APPLY2(real, tensor, int64_t, index, dim, for (i = 0; i < elems_per_row; ++i) { idx = *(index_data + i*index_stride); if (idx < TH_INDEX_BASE || idx >= tensor_size + TH_INDEX_BASE) { THFree(TH_TENSOR_DIM_APPLY_counter); THError("Invalid index in scatter"); } tensor_data[(idx - TH_INDEX_BASE) * tensor_stride] = val; }) } accreal THTensor_(dot)(THTensor *tensor, THTensor *src) { accreal sum = 0; /* we use a trick here. careful with that. */ TH_TENSOR_APPLY2(real, tensor, real, src, int64_t sz = (tensor_size-tensor_i < src_size-src_i ? tensor_size-tensor_i : src_size-src_i); sum += THBlas_(dot)(sz, src_data, src_stride, tensor_data, tensor_stride); tensor_i += sz; src_i += sz; tensor_data += sz*tensor_stride; src_data += sz*src_stride; break;); return sum; } #undef th_isnan #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #define th_isnan(val) \ (isnan(val)) #else #define th_isnan(val) (0) #endif #undef th_isnan_break #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #define th_isnan_break(val) \ if (isnan(val)) break; #else #define th_isnan_break(val) #endif real THTensor_(minall)(THTensor *tensor) { real theMin; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMin = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value<theMin in the case of NaNs */ if(!(value >= theMin)) { theMin = value; th_isnan_break(value) }); return theMin; } real THTensor_(maxall)(THTensor *tensor) { real theMax; real value; THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); theMax = THTensor_(data)(tensor)[0]; TH_TENSOR_APPLY(real, tensor, value = *tensor_data; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theMax = value; th_isnan_break(value) }); return theMax; } static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, int64_t stride); real THTensor_(medianall)(THTensor *tensor) { THArgCheck(tensor->nDimension > 0, 1, "tensor must have one dimension"); real theMedian; ptrdiff_t numel; int64_t k; THTensor *temp_; real *temp__data; numel = THTensor_(nElement)(tensor); k = (numel-1) >> 1; temp_ = THTensor_(newClone)(tensor); temp__data = THTensor_(data)(temp_); THTensor_(quickselectnoidx)(temp__data, k, numel, 1); theMedian = temp__data[k]; THTensor_(free)(temp_); return theMedian; } accreal THTensor_(sumall)(THTensor *tensor) { accreal sum = 0; TH_TENSOR_APPLY(real, tensor, sum += *tensor_data;); return sum; } accreal THTensor_(prodall)(THTensor *tensor) { accreal prod = 1; TH_TENSOR_APPLY(real, tensor, prod *= *tensor_data;); return prod; } void THTensor_(add)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(adds)(r__data, t_data, value, r__len);); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data + value;); } } void THTensor_(sub)(THTensor *r_, THTensor *t, real value) { THTensor_(add)(r_, t, -value); } void THTensor_(add_scaled)(THTensor *r_, THTensor *t, real value, real alpha) { THTensor_(add)(r_, t, value * alpha); } void THTensor_(sub_scaled)(THTensor *r_, THTensor *t, real value, real alpha) { THTensor_(add)(r_, t, -value * alpha); } void THTensor_(mul)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(muls)(r__data, t_data, value, r__len);); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * value;); } } void THTensor_(div)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(divs)(r__data, t_data, value, r__len);); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data / value;); } } void THTensor_(lshift)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) return THTensor_(mul)(r_, t, powf(2, value)); #elif defined(TH_REAL_IS_DOUBLE) return THTensor_(mul)(r_, t, pow(2, value)); #elif defined(TH_REAL_IS_HALF) return THError("lshift is not supported for torch.HalfTensor"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) << value; #else rp[i] = ((ureal) tp[i]) << value; #endif } } else { #if defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) << value);); #else TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) << value);); #endif } #endif } void THTensor_(rshift)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) return THTensor_(div)(r_, t, powf(2, value)); #elif defined(TH_REAL_IS_DOUBLE) return THTensor_(div)(r_, t, pow(2, value)); #elif defined(TH_REAL_IS_HALF) return THError("rshift is not supported for torch.HalfTensor"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) >> value; #else rp[i] = ((ureal) tp[i]) >> value; #endif } } else { #if defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((real) *t_data) >> value);); #else TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (((ureal) *t_data) >> value);); #endif } #endif } void THTensor_(fmod)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = fmod(tp[i], value); #else rp[i] = tp[i] % value; #endif } } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = fmod(*t_data, value);); #else TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data % value);); #endif } } void THTensor_(remainder)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = (value == 0)? NAN : tp[i] - value * floor(tp[i] / value); #else // There is no NAN for integers rp[i] = tp[i] % value; if (rp[i] * value < 0) rp[i] += value; #endif } } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (value == 0)? NAN : *t_data - value * floor(*t_data / value);); #else // There is no NAN for integers TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data % value; if (*r__data * value < 0) *r__data += value;); #endif } } void THTensor_(bitand)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("bitand is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] & value; } } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data & value;); } #endif } void THTensor_(bitor)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("bitor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] | value; } } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data | value;); } #endif } void THTensor_(bitxor)(THTensor *r_, THTensor *t, real value) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("bitxor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); int64_t sz = THTensor_(nElement)(t); int64_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD * 100) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] ^ value; } } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data ^ value;); } #endif } void THTensor_(clamp)(THTensor *r_, THTensor *t, real min_value, real max_value) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); /* real t_val; */ ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = (tp[i] < min_value) ? min_value : (tp[i] > max_value ? max_value : tp[i]); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = (*t_data < min_value) ? min_value : (*t_data > max_value ? max_value : *t_data);); } } void THTensor_(cadd)(THTensor *r_, THTensor *t, real value, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { if(r_ == t) { THBlas_(axpy)(THTensor_(nElement)(t), value, THTensor_(data)(src), 1, THTensor_(data)(r_), 1); } else { TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cadd)(r__data, t_data, src_data, value, r__len);); } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data + value * *src_data;); } } void THTensor_(csub)(THTensor *r_, THTensor *t, real value, THTensor *src) { THTensor_(cadd)(r_, t, -value, src); } void THTensor_(cmul)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cmul)(r__data, t_data, src_data, r__len);); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * *src_data;); } } void THTensor_(cpow)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = pow(tp[i], sp[i]); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = pow(*t_data, *src_data);); } } void THTensor_(cdiv)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { TH_TENSOR_APPLY3_CONTIG(real, r_, real, t, real, src, THVector_(cdiv)(r__data, t_data, src_data, r__len);); } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / *src_data;); } } void THTensor_(clshift)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_HALF) return THError("clshift is not supported for torch.HalfTensor"); #endif THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) rp[i] = tp[i] * powf(2, sp[i]); #elif defined(TH_REAL_IS_DOUBLE) rp[i] = tp[i] * pow(2, sp[i]); #elif defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) << sp[i]; #else rp[i] = ((ureal) tp[i]) << sp[i]; #endif } } else { #if defined(TH_REAL_IS_FLOAT) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * powf(2, *src_data);); #elif defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data * pow(2, *src_data);); #elif defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) << *src_data;); #else TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) << *src_data;); #endif } } void THTensor_(crshift)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_HALF) return THError("crshift is not supported for torch.HalfTensor"); #endif THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) rp[i] = tp[i] / powf(2, sp[i]); #elif defined(TH_REAL_IS_DOUBLE) rp[i] = tp[i] / pow(2, sp[i]); #elif defined(TH_REAL_IS_BYTE) rp[i] = ((real) tp[i]) >> sp[i]; #else rp[i] = ((ureal) tp[i]) >> sp[i]; #endif } } else { #if defined(TH_REAL_IS_FLOAT) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / powf(2, *src_data);); #elif defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data / pow(2, *src_data);); #elif defined(TH_REAL_IS_BYTE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((real)*t_data) >> *src_data;); #else TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = ((ureal)*t_data) >> *src_data;); #endif } } void THTensor_(cfmod)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = fmod(tp[i], sp[i]); #else rp[i] = tp[i] % sp[i]; #endif } } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = fmod(*t_data, *src_data);); #else TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*t_data % *src_data);); #endif } } void THTensor_(cremainder)(THTensor *r_, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) rp[i] = (sp[i] == 0)? NAN : tp[i] - sp[i] * floor(tp[i] / sp[i]); #else // There is no NAN for integers rp[i] = tp[i] % sp[i]; if (rp[i] * sp[i] < 0) rp[i] += sp[i]; #endif } } else { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = (*src_data == 0)? NAN : *t_data - *src_data * floor(*t_data / *src_data);); #else // There is no NAN for integers TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data % *src_data; if (*r__data * *src_data < 0) *r__data += *src_data;); #endif } } void THTensor_(cbitand)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("cbitand is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] & sp[i]; } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data & *src_data;); } #endif } void THTensor_(cbitor)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("cbitor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] | sp[i]; } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data | *src_data;); } #endif } void THTensor_(cbitxor)(THTensor *r_, THTensor *t, THTensor *src) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) || defined(TH_REAL_IS_HALF) return THError("cbitxor is only supported for integer type tensors"); #else THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(isContiguous)(src) && THTensor_(nElement)(r_) == THTensor_(nElement)(src)) { real *tp = THTensor_(data)(t); real *sp = THTensor_(data)(src); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) { rp[i] = tp[i] ^ sp[i]; } } else { TH_TENSOR_APPLY3(real, r_, real, t, real, src, *r__data = *t_data ^ *src_data;); } #endif } void THTensor_(tpow)(THTensor *r_, real value, THTensor *t) { THTensor_(resizeAs)(r_, t); if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t) && THTensor_(nElement)(r_) == THTensor_(nElement)(t)) { real *tp = THTensor_(data)(t); real *rp = THTensor_(data)(r_); ptrdiff_t sz = THTensor_(nElement)(t); ptrdiff_t i; #pragma omp parallel for if(sz > TH_OMP_OVERHEAD_THRESHOLD) private(i) for (i=0; i<sz; i++) rp[i] = pow(value, tp[i]); } else { TH_TENSOR_APPLY2(real, r_, real, t, *r__data = pow(value, *t_data);); } } void THTensor_(addcmul)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data * *src2_data;); } void THTensor_(addcdiv)(THTensor *r_, THTensor *t, real value, THTensor *src1, THTensor *src2) { if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } TH_TENSOR_APPLY3(real, r_, real, src1, real, src2, *r__data += value * *src1_data / *src2_data;); } void THTensor_(addmv)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *mat, THTensor *vec) { if( (mat->nDimension != 2) || (vec->nDimension != 1) ) THError("matrix and vector expected, got %dD, %dD", mat->nDimension, vec->nDimension); if( mat->size[1] != vec->size[0] ) { THDescBuff bm = THTensor_(sizeDesc)(mat); THDescBuff bv = THTensor_(sizeDesc)(vec); THError("size mismatch, %s, %s", bm.str, bv.str); } if(t->nDimension != 1) THError("vector expected, got t: %dD", t->nDimension); if(t->size[0] != mat->size[0]) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm = THTensor_(sizeDesc)(mat); THError("size mismatch, t: %s, mat: %s", bt.str, bm.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } if(mat->stride[0] == 1) { THBlas_(gemv)('n', mat->size[0], mat->size[1], alpha, THTensor_(data)(mat), mat->stride[1], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else if(mat->stride[1] == 1) { THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(mat), mat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cmat = THTensor_(newContiguous)(mat); THBlas_(gemv)('t', mat->size[1], mat->size[0], alpha, THTensor_(data)(cmat), cmat->stride[0], THTensor_(data)(vec), vec->stride[0], beta, THTensor_(data)(r_), r_->stride[0]); THTensor_(free)(cmat); } } void THTensor_(match)(THTensor *r_, THTensor *m1, THTensor *m2, real gain) { int64_t N1 = m1->size[0]; int64_t N2 = m2->size[0]; int64_t dim; real *m1_p; real *m2_p; real *r_p; int64_t i; THTensor_(resize2d)(r_, N1, N2); m1 = THTensor_(newContiguous)(m1); m2 = THTensor_(newContiguous)(m2); THTensor_(resize2d)(m1, N1, THTensor_(nElement)(m1) / N1); THTensor_(resize2d)(m2, N2, THTensor_(nElement)(m2) / N2); dim = m1->size[1]; THArgCheck(m1->size[1] == m2->size[1], 3, "m1 and m2 must have the same inner vector dim"); m1_p = THTensor_(data)(m1); m2_p = THTensor_(data)(m2); r_p = THTensor_(data)(r_); #pragma omp parallel for private(i) for (i=0; i<N1; i++) { int64_t j,k; for (j=0; j<N2; j++) { real sum = 0; for (k=0; k<dim; k++) { real term = m1_p[ i*dim + k ] - m2_p[ j*dim + k ]; sum += term*term; } r_p[ i*N2 + j ] = gain * sum; } } THTensor_(free)(m1); THTensor_(free)(m2); } void THTensor_(addmm)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *m1, THTensor *m2) { char transpose_r, transpose_m1, transpose_m2; THTensor *r__, *m1_, *m2_; if( (m1->nDimension != 2) || (m2->nDimension != 2)) THError("matrices expected, got %dD, %dD tensors", m1->nDimension, m2->nDimension); if(m1->size[1] != m2->size[0]) { THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( t->nDimension != 2 ) THError("matrix expected, got %dD tensor for t", t->nDimension); if( (t->size[0] != m1->size[0]) || (t->size[1] != m2->size[1]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bm1 = THTensor_(sizeDesc)(m1); THDescBuff bm2 = THTensor_(sizeDesc)(m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THTensor_(resizeAs)(r_, t); if (beta != 0.0) { THTensor_(copy)(r_, t); } } /* r_ */ if(r_->stride[0] == 1 && r_->stride[1] != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride[1] == 1 && r_->stride[0] != 0) { THTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THTensor *transp_r_ = THTensor_(newTranspose)(r_, 0, 1); r__ = THTensor_(newClone)(transp_r_); THTensor_(free)(transp_r_); THTensor_(transpose)(r__, NULL, 0, 1); } /* m1 */ if(m1->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m1->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m1->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THTensor_(newContiguous)(m1); } /* m2 */ if(m2->stride[(transpose_r == 'n' ? 0 : 1)] == 1 && m2->stride[(transpose_r == 'n' ? 1 : 0)] != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride[(transpose_r == 'n' ? 1 : 0)] == 1 && m2->stride[(transpose_r == 'n' ? 0 : 1)] != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THTensor_(newContiguous)(m2); } #pragma omp critical(blasgemm) /* do the operation */ THBlas_(gemm)(transpose_m1, transpose_m2, r__->size[(transpose_r == 'n' ? 0 : 1)], r__->size[(transpose_r == 'n' ? 1 : 0)], m1_->size[(transpose_r == 'n' ? 1 : 0)], alpha, THTensor_(data)(m1_), (transpose_m1 == 'n' ? m1_->stride[(transpose_r == 'n' ? 1 : 0)] : m1_->stride[(transpose_r == 'n' ? 0 : 1)]), THTensor_(data)(m2_), (transpose_m2 == 'n' ? m2_->stride[(transpose_r == 'n' ? 1 : 0)] : m2_->stride[(transpose_r == 'n' ? 0 : 1)]), beta, THTensor_(data)(r__), r__->stride[(transpose_r == 'n' ? 1 : 0)]); /* free intermediate variables */ if(m1_ != m1) THTensor_(free)(m1_); if(m2_ != m2) THTensor_(free)(m2_); if(r__ != r_) THTensor_(freeCopyTo)(r__, r_); } void THTensor_(addr)(THTensor *r_, real beta, THTensor *t, real alpha, THTensor *vec1, THTensor *vec2) { if( (vec1->nDimension != 1) || (vec2->nDimension != 1) ) THError("vector and vector expected, got %dD, %dD tensors", vec1->nDimension, vec2->nDimension); if(t->nDimension != 2) THError("expected matrix, got %dD tensor for t", t->nDimension); if( (t->size[0] != vec1->size[0]) || (t->size[1] != vec2->size[0]) ) { THDescBuff bt = THTensor_(sizeDesc)(t); THDescBuff bv1 = THTensor_(sizeDesc)(vec1); THDescBuff bv2 = THTensor_(sizeDesc)(vec2); THError("size mismatch, t: %s, vec1: %s, vec2: %s", bt.str, bv1.str, bv2.str); } if(r_ != t) { THTensor_(resizeAs)(r_, t); THTensor_(copy)(r_, t); } if(beta == 0) { THTensor_(zero)(r_); } else if(beta != 1) THTensor_(mul)(r_, r_, beta); if(r_->stride[0] == 1) { THBlas_(ger)(vec1->size[0], vec2->size[0], alpha, THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(r_), r_->stride[1]); } else if(r_->stride[1] == 1) { THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(r_), r_->stride[0]); } else { THTensor *cr = THTensor_(newClone)(r_); THBlas_(ger)(vec2->size[0], vec1->size[0], alpha, THTensor_(data)(vec2), vec2->stride[0], THTensor_(data)(vec1), vec1->stride[0], THTensor_(data)(cr), cr->stride[0]); THTensor_(freeCopyTo)(cr, r_); } } void THTensor_(addbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { int64_t batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor"); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor"); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1,2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2,2)); int64_t dim1 = THTensor_(size)(batch1, 1); int64_t dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); if (beta != 0.0) { THTensor_(copy)(result, t); } } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(addmm)(result, beta, result, alpha, matrix1, matrix2); beta = 1; // accumulate output once } THTensor_(free)(matrix1); THTensor_(free)(matrix2); } void THTensor_(baddbmm)(THTensor *result, real beta, THTensor *t, real alpha, THTensor *batch1, THTensor *batch2) { int64_t batch; THArgCheck(THTensor_(nDimension)(batch1) == 3, 1, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch1)); THArgCheck(THTensor_(nDimension)(batch2) == 3, 2, "expected 3D tensor, got %dD", THTensor_(nDimension)(batch2)); THArgCheck(THTensor_(size)(batch1, 0) == THTensor_(size)(batch2, 0), 2, "equal number of batches expected, got %d, %d", THTensor_(size)(batch1, 0), THTensor_(size)(batch2, 0)); THArgCheck(THTensor_(size)(batch1, 2) == THTensor_(size)(batch2, 1), 2, "wrong matrix size, batch1: %dx%d, batch2: %dx%d", THTensor_(size)(batch1, 1), THTensor_(size)(batch1, 2), THTensor_(size)(batch2, 1), THTensor_(size)(batch2, 2)); int64_t bs = THTensor_(size)(batch1, 0); int64_t dim1 = THTensor_(size)(batch1, 1); int64_t dim2 = THTensor_(size)(batch2, 2); THArgCheck(THTensor_(size)(t, 0) == bs, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 1) == dim1, 1, "output tensor of incorrect size"); THArgCheck(THTensor_(size)(t, 2) == dim2, 1, "output tensor of incorrect size"); if (t != result) { THTensor_(resizeAs)(result, t); if (beta != 0.0) { THTensor_(copy)(result, t); } } THTensor *matrix1 = THTensor_(new)(); THTensor *matrix2 = THTensor_(new)(); THTensor *result_matrix = THTensor_(new)(); for (batch = 0; batch < THTensor_(size)(batch1, 0); ++batch) { THTensor_(select)(matrix1, batch1, 0, batch); THTensor_(select)(matrix2, batch2, 0, batch); THTensor_(select)(result_matrix, result, 0, batch); THTensor_(addmm)(result_matrix, beta, result_matrix, alpha, matrix1, matrix2); } THTensor_(free)(matrix1); THTensor_(free)(matrix2); THTensor_(free)(result_matrix); } ptrdiff_t THTensor_(numel)(THTensor *t) { return THTensor_(nElement)(t); } void THTensor_(max)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { real theMax; real value; int64_t theIndex; int64_t i; TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, theMax = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value>theMax in the case of NaNs */ if(!(value <= theMax)) { theIndex = i; theMax = value; th_isnan_break(value) } } *indices__data = theIndex; *values__data = theMax;); } else { if (THTensor_(nDimension)(t) > 1) { THTensor *t0 = THTensor_(newSelect)(t, dimension, 0); THTensor_(copy)(values_, t0); THTensor_(free)(t0); } else { THTensor_(fill)(values_, THTensor_(get1d)(t, 0)); } THLongTensor_zero(indices_); if(t->size[dimension] == 1) { if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } return; } THTensor *tempValues_ = THTensor_(newWithTensor)(values_); // tempValues_.expand_as(t) tempValues_->size[dimension] = t->size[dimension]; tempValues_->stride[dimension] = 0; THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_); // tempIndices_.expand_as(t) tempIndices_->size[dimension] = t->size[dimension]; tempIndices_->stride[dimension] = 0; TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension, if(!(*t_data <= *tempValues__data) && !th_isnan(*tempValues__data)) { *tempValues__data = *t_data; *tempIndices__data = *tempIndices__dimOffset; }); THTensor_(free)(tempValues_); THLongTensor_free(tempIndices_); } if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } } void THTensor_(min)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { real theMax; real value; int64_t theIndex; int64_t i; TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, theMax = t_data[0]; theIndex = 0; for(i = 0; i < t_size; i++) { value = t_data[i*t_stride]; /* This is not the same as value>theMax in the case of NaNs */ if(!(value >= theMax)) { theIndex = i; theMax = value; th_isnan_break(value) } } *indices__data = theIndex; *values__data = theMax;); } else { if (THTensor_(nDimension)(t) > 1) { THTensor *t0 = THTensor_(newSelect)(t, dimension, 0); THTensor_(copy)(values_, t0); THTensor_(free)(t0); } else { THTensor_(fill)(values_, THTensor_(get1d)(t, 0)); } THLongTensor_zero(indices_); if(t->size[dimension] == 1) { if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } return; } THTensor *tempValues_ = THTensor_(newWithTensor)(values_); // tempValues_.expand_as(t) tempValues_->size[dimension] = t->size[dimension]; tempValues_->stride[dimension] = 0; THLongTensor *tempIndices_ = THLongTensor_newWithTensor(indices_); // tempIndices_.expand_as(t) tempIndices_->size[dimension] = t->size[dimension]; tempIndices_->stride[dimension] = 0; TH_TENSOR_APPLY3_D(real, t, real, tempValues_, int64_t, tempIndices_, dimension, if(!(*t_data >= *tempValues__data) && !th_isnan(*tempValues__data)) { *tempValues__data = *t_data; *tempIndices__data = *tempIndices__dimOffset; }); THTensor_(free)(tempValues_); THLongTensor_free(tempIndices_); } if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } } void THTensor_(sum)(THTensor *r_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; int64_t i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride]; *r__data = (real)sum;); } else { THTensor_(zero)(r_); THTensor *temp_ = THTensor_(newWithTensor)(r_); // r_.expand_as(t) temp_->size[dimension] = t->size[dimension]; temp_->stride[dimension] = 0; TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data + *t_data;); THTensor_(free)(temp_); } if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } } void THTensor_(prod)(THTensor *r_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); // two implementations optimized for data locality if (t->stride[dimension] == 1) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal prod = 1; int64_t i; for(i = 0; i < t_size; i++) prod *= t_data[i*t_stride]; *r__data = (real)prod;); } else { THTensor_(fill)(r_, 1); THTensor *temp_ = THTensor_(newWithTensor)(r_); // r_.expand_as(t) temp_->size[dimension] = t->size[dimension]; temp_->stride[dimension] = 0; TH_TENSOR_APPLY2(real, temp_, real, t, *temp__data = *temp__data * *t_data;); THTensor_(free)(temp_); } if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } } void THTensor_(cumsum)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumsum = 0; int64_t i; for(i = 0; i < t_size; i++) { cumsum += t_data[i*t_stride]; r__data[i*r__stride] = (real)cumsum; }); } void THTensor_(cumprod)(THTensor *r_, THTensor *t, int dimension) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "dimension %d out of range", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(r_, t); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal cumprod = 1; int64_t i; for(i = 0; i < t_size; i++) { cumprod *= t_data[i*t_stride]; r__data[i*r__stride] = (real)cumprod; }); } void THTensor_(sign)(THTensor *r_, THTensor *t) { THTensor_(resizeAs)(r_, t); #if defined (TH_REAL_IS_BYTE) TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else *r__data = 0;); #else TH_TENSOR_APPLY2(real, r_, real, t, if (*t_data > 0) *r__data = 1; else if (*t_data < 0) *r__data = -1; else *r__data = 0;); #endif } accreal THTensor_(trace)(THTensor *t) { real *t_data = THTensor_(data)(t); accreal sum = 0; int64_t i = 0; int64_t t_stride_0, t_stride_1, t_diag_size; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); t_diag_size = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)); while(i < t_diag_size) { sum += t_data[i*(t_stride_0+t_stride_1)]; i++; } return sum; } void THTensor_(cross)(THTensor *r_, THTensor *a, THTensor *b, int dimension) { int i; if(THTensor_(nDimension)(a) != THTensor_(nDimension)(b)) THError("inconsistent tensor dimension %dD, %dD", THTensor_(nDimension)(a), THTensor_(nDimension)(b)); for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) != THTensor_(size)(b, i)) { THDescBuff ba = THTensor_(sizeDesc)(a); THDescBuff bb = THTensor_(sizeDesc)(b); THError("inconsistent tensor sizes %s, %s", ba.str, bb.str); } } if(dimension < 0) { for(i = 0; i < THTensor_(nDimension)(a); i++) { if(THTensor_(size)(a, i) == 3) { dimension = i; break; } } if(dimension < 0) { THDescBuff ba = THTensor_(sizeDesc)(a); THError("no dimension of size 3 in a: %s", ba.str); } } THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(a), 3, "dimension %d out of range", dimension + TH_INDEX_BASE); THArgCheck(THTensor_(size)(a, dimension) == 3, 3, "dimension %d does not have size 3", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(r_, a); TH_TENSOR_DIM_APPLY3(real, a, real, b, real, r_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, r__data[0*r__stride] = a_data[1*a_stride]*b_data[2*b_stride] - a_data[2*a_stride]*b_data[1*b_stride]; r__data[1*r__stride] = a_data[2*a_stride]*b_data[0*b_stride] - a_data[0*a_stride]*b_data[2*b_stride]; r__data[2*r__stride] = a_data[0*a_stride]*b_data[1*b_stride] - a_data[1*a_stride]*b_data[0*b_stride];); } void THTensor_(cmax)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data > *src_data ? *t_data : *src_data;); } void THTensor_(cmin)(THTensor *r, THTensor *t, THTensor *src) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY3(real, r, real, t, real, src, *r_data = *t_data < *src_data ? *t_data : *src_data;); } void THTensor_(cmaxValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data > value ? *t_data : value;); } void THTensor_(cminValue)(THTensor *r, THTensor *t, real value) { THTensor_(resizeAs)(r, t); TH_TENSOR_APPLY2(real, r, real, t, *r_data = *t_data < value ? *t_data : value;); } void THTensor_(zeros)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(zero)(r_); } void THTensor_(zerosLike)(THTensor *r_, THTensor *input) { THTensor_(resizeAs)(r_, input); THTensor_(zero)(r_); } void THTensor_(onesLike)(THTensor *r_, THTensor *input) { THTensor_(resizeAs)(r_, input); THTensor_(fill)(r_, 1); } void THTensor_(ones)(THTensor *r_, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(fill)(r_, 1); } void THTensor_(diag)(THTensor *r_, THTensor *t, int k) { THArgCheck(THTensor_(nDimension)(t) == 1 || THTensor_(nDimension)(t) == 2, 1, "matrix or a vector expected"); if(THTensor_(nDimension)(t) == 1) { real *t_data = THTensor_(data)(t); int64_t t_stride_0 = THTensor_(stride)(t, 0); int64_t t_size = THTensor_(size)(t, 0); int64_t sz = t_size + (k >= 0 ? k : -k); real *r__data; int64_t r__stride_0; int64_t r__stride_1; int64_t i; THTensor_(resize2d)(r_, sz, sz); THTensor_(zero)(r_); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data += (k >= 0 ? k*r__stride_1 : -k*r__stride_0); for(i = 0; i < t_size; i++) r__data[i*(r__stride_0+r__stride_1)] = t_data[i*t_stride_0]; } else { real *t_data = THTensor_(data)(t); int64_t t_stride_0 = THTensor_(stride)(t, 0); int64_t t_stride_1 = THTensor_(stride)(t, 1); int64_t sz; real *r__data; int64_t r__stride_0; int64_t i; if(k >= 0) sz = THMin(THTensor_(size)(t, 0), THTensor_(size)(t, 1)-k); else sz = THMin(THTensor_(size)(t, 0)+k, THTensor_(size)(t, 1)); THTensor_(resize1d)(r_, sz); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_, 0); t_data += (k >= 0 ? k*t_stride_1 : -k*t_stride_0); for(i = 0; i < sz; i++) r__data[i*r__stride_0] = t_data[i*(t_stride_0+t_stride_1)]; } } void THTensor_(eye)(THTensor *r_, int64_t n, int64_t m) { real *r__data; int64_t i, sz; THArgCheck(n > 0, 1, "invalid argument"); if(m <= 0) m = n; THTensor_(resize2d)(r_, n, m); THTensor_(zero)(r_); i = 0; r__data = THTensor_(data)(r_); sz = THMin(THTensor_(size)(r_, 0), THTensor_(size)(r_, 1)); for(i = 0; i < sz; i++) r__data[i*(r_->stride[0]+r_->stride[1])] = 1; } void THTensor_(range)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { ptrdiff_t size; real i = 0; THArgCheck(step > 0 || step < 0, 3, "step must be a non-null number"); THArgCheck(((step > 0) && (xmax >= xmin)) || ((step < 0) && (xmax <= xmin)) , 2, "upper bound and larger bound incoherent with step sign"); size = (ptrdiff_t) (((xmax - xmin) / step) + 1); if (THTensor_(nElement)(r_) != size) { THTensor_(resize1d)(r_, size); } TH_TENSOR_APPLY(real, r_, *r__data = xmin + (i++)*step;); } void THTensor_(arange)(THTensor *r_, accreal xmin, accreal xmax, accreal step) { #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) int m = fmod(xmax - xmin, step) == 0; #else int m = (xmax - xmin) % step == 0; #endif if (m) xmax -= step; THTensor_(range)(r_, xmin, xmax, step); } void THTensor_(randperm)(THTensor *r_, THGenerator *_generator, int64_t n) { real *r__data; int64_t r__stride_0; int64_t i; THArgCheck(n > 0, 1, "must be strictly positive"); THTensor_(resize1d)(r_, n); r__data = THTensor_(data)(r_); r__stride_0 = THTensor_(stride)(r_,0); for(i = 0; i < n; i++) r__data[i*r__stride_0] = (real)(i); for(i = 0; i < n-1; i++) { int64_t z = THRandom_random(_generator) % (n-i); real sav = r__data[i*r__stride_0]; r__data[i*r__stride_0] = r__data[(z+i)*r__stride_0]; r__data[(z+i)*r__stride_0] = sav; } } void THTensor_(reshape)(THTensor *r_, THTensor *t, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(copy)(r_, t); } /* I cut and pasted (slightly adapted) the quicksort code from Sedgewick's 1978 "Implementing Quicksort Programs" article http://www.csie.ntu.edu.tw/~b93076/p847-sedgewick.pdf It is the state of the art existing implementation. The macros are here to make as close a match as possible to the pseudocode of Program 2 p.851 Note that other partition schemes exist, and are typically presented in textbook, but those are less efficient. See e.g. http://cs.stackexchange.com/questions/11458/quicksort-partitioning-hoare-vs-lomuto Julien, November 12th 2013 */ #define MAX_LEVELS 300 #define M_SMALL 10 /* Limit for small subfiles */ #define ARR(III) arr[(III)*stride] #define IDX(III) idx[(III)*stride] #define LONG_SWAP(AAA, BBB) swap = AAA; AAA = BBB; BBB = swap #define REAL_SWAP(AAA, BBB) rswap = AAA; AAA = BBB; BBB = rswap #define ARR_SWAP(III, JJJ) \ REAL_SWAP(ARR(III), ARR(JJJ)); #define BOTH_SWAP(III, JJJ) \ REAL_SWAP(ARR(III), ARR(JJJ)); \ LONG_SWAP(IDX(III), IDX(JJJ)) static void THTensor_(quicksortascend)(real *arr, int64_t *idx, int64_t elements, int64_t stride) { int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) < piv); do { j = j-1; } while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) > ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) < piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } static void THTensor_(quicksortdescend)(real *arr, int64_t *idx, int64_t elements, int64_t stride) { int64_t beg[MAX_LEVELS], end[MAX_LEVELS], i, j, L, R, P, swap, pid, stack = 0, sz_right, sz_left; real rswap, piv; unsigned char done = 0; /* beg[0]=0; end[0]=elements; */ stack = 0; L = 0; R = elements-1; done = elements-1 <= M_SMALL; while(!done) { /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) < ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) < ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) < ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do { i = i+1; } while(ARR(i) > piv); do { j = j-1; } while(ARR(j) < piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Left subfile is (L, j-1) */ /* Right subfile is (i, R) */ sz_left = j-L; sz_right = R-i+1; if (sz_left <= M_SMALL && sz_right <= M_SMALL) { /* both subfiles are small */ /* if stack empty */ if (stack == 0) { done = 1; } else { stack--; L = beg[stack]; R = end[stack]; } } else if (sz_left <= M_SMALL || sz_right <= M_SMALL) { /* exactly one of the subfiles is small */ /* (L,R) = large subfile */ if (sz_left > sz_right) { /* Implicit: L = L; */ R = j-1; } else { L = i; /* Implicit: R = R; */ } } else { /* none of the subfiles is small */ /* push large subfile */ /* (L,R) = small subfile */ if (sz_left > sz_right) { beg[stack] = L; end[stack] = j-1; stack++; L = i; /* Implicit: R = R */ } else { beg[stack] = i; end[stack] = R; stack++; /* Implicit: L = L; */ R = j-1; } } } /* while not done */ /* Now insertion sort on the concatenation of subfiles */ for(i=elements-2; i>=0; i--) { if (ARR(i) < ARR(i+1)) { piv = ARR(i); pid = IDX(i); j = i+1; do { ARR(j-1) = ARR(j); IDX(j-1) = IDX(j); j = j+1; } while(j < elements && ARR(j) > piv); ARR(j-1) = piv; IDX(j-1) = pid; } } } #undef MAX_LEVELS #undef M_SMALL void THTensor_(sort)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int dimension, int descendingOrder) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); THTensor_(resizeAs)(rt_, t); THTensor_(copy)(rt_, t); { THLongStorage *size = THTensor_(newSizeOf)(t); THLongTensor_resize(ri_, size, NULL); THLongStorage_free(size); } if(descendingOrder) { TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension, int64_t i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortdescend)(rt__data, ri__data, rt__size, rt__stride);) } else { TH_TENSOR_DIM_APPLY2(real, rt_, int64_t, ri_, dimension, int64_t i; for(i = 0; i < ri__size; i++) ri__data[i*ri__stride] = i; THTensor_(quicksortascend)(rt__data, ri__data, rt__size, rt__stride);) } } /* Implementation of the Quickselect algorithm, based on Nicolas Devillard's public domain implementation at http://ndevilla.free.fr/median/median/ Adapted similarly to the above Quicksort algorithm. This version does not produce indices along with values. */ static void THTensor_(quickselectnoidx)(real *arr, int64_t k, int64_t elements, int64_t stride) { int64_t P, L, R, i, j, swap; real rswap, piv; L = 0; R = elements-1; do { if (R <= L) /* One element only */ return; if (R == L+1) { /* Two elements only */ if (ARR(L) > ARR(R)) { ARR_SWAP(L, R); } return; } /* Use median of three for pivot choice */ P=(L+R)>>1; ARR_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { ARR_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { ARR_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { ARR_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); do { do i++; while(ARR(i) < piv); do j--; while(ARR(j) > piv); if (j < i) break; ARR_SWAP(i, j); } while(1); ARR_SWAP(L, j); /* Re-set active partition */ if (j <= k) L=i; if (j >= k) R=j-1; } while(1); } /* Implementation of the Quickselect algorithm, based on Nicolas Devillard's public domain implementation at http://ndevilla.free.fr/median/median/ Adapted similarly to the above Quicksort algorithm. */ static void THTensor_(quickselect)(real *arr, int64_t *idx, int64_t k, int64_t elements, int64_t stride) { int64_t P, L, R, i, j, swap, pid; real rswap, piv; L = 0; R = elements-1; do { if (R <= L) /* One element only */ return; if (R == L+1) { /* Two elements only */ if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } return; } /* Use median of three for pivot choice */ P=(L+R)>>1; BOTH_SWAP(P, L+1); if (ARR(L+1) > ARR(R)) { BOTH_SWAP(L+1, R); } if (ARR(L) > ARR(R)) { BOTH_SWAP(L, R); } if (ARR(L+1) > ARR(L)) { BOTH_SWAP(L+1, L); } i = L+1; j = R; piv = ARR(L); pid = IDX(L); do { do i++; while(ARR(i) < piv); do j--; while(ARR(j) > piv); if (j < i) break; BOTH_SWAP(i, j); } while(1); BOTH_SWAP(L, j); /* Re-set active partition */ if (j <= k) L=i; if (j >= k) R=j-1; } while(1); } #undef ARR #undef IDX #undef LONG_SWAP #undef REAL_SWAP #undef BOTH_SWAP void THTensor_(mode)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) { THLongStorage *dim; THTensor *temp_; THLongTensor *tempi_; real *temp__data; int64_t *tempi__data; int64_t t_size_dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); t_size_dim = THTensor_(size)(t, dimension); temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); temp__data = THTensor_(data)(temp_); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; real mode = 0; int64_t modei = 0; int64_t temp_freq = 0; int64_t max_freq = 0; for(i = 0; i < t_size_dim; i++) temp__data[i] = t_data[i*t_stride]; for(i = 0; i < t_size_dim; i++) tempi__data[i] = i; THTensor_(quicksortascend)(temp__data, tempi__data, t_size_dim, 1); for(i = 0; i < t_size_dim; i++) { temp_freq++; if ((i == t_size_dim - 1) || (temp__data[i] != temp__data[i+1])) { if (temp_freq > max_freq) { mode = temp__data[i]; modei = tempi__data[i]; max_freq = temp_freq; } temp_freq = 0; } } *values__data = mode; *indices__data = modei;); THTensor_(free)(temp_); THLongTensor_free(tempi_); if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } } void THTensor_(kthvalue)(THTensor *values_, THLongTensor *indices_, THTensor *t, int64_t k, int dimension, int keepdim) { THLongStorage *dim; THTensor *temp_; THLongTensor *tempi_; real *temp__data; int64_t *tempi__data; int64_t t_size_dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); THArgCheck(k > 0 && k <= t->size[dimension], 2, "selected index out of range"); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(values_, dim, NULL); THLongTensor_resize(indices_, dim, NULL); THLongStorage_free(dim); t_size_dim = THTensor_(size)(t, dimension); temp_ = THTensor_(new)(); THTensor_(resize1d)(temp_, t_size_dim); temp__data = THTensor_(data)(temp_); tempi_ = THLongTensor_new(); THLongTensor_resize1d(tempi_, t_size_dim); tempi__data = THLongTensor_data(tempi_); TH_TENSOR_DIM_APPLY3(real, t, real, values_, int64_t, indices_, dimension, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; for(i = 0; i < t_size_dim; i++) temp__data[i] = t_data[i*t_stride]; for(i = 0; i < t_size_dim; i++) tempi__data[i] = i; THTensor_(quickselect)(temp__data, tempi__data, k - 1, t_size_dim, 1); *values__data = temp__data[k-1]; *indices__data = tempi__data[k-1];); THTensor_(free)(temp_); THLongTensor_free(tempi_); if (!keepdim) { THTensor_(squeeze1d)(values_, values_, dimension); THLongTensor_squeeze1d(indices_, indices_, dimension); } } void THTensor_(median)(THTensor *values_, THLongTensor *indices_, THTensor *t, int dimension, int keepdim) { int64_t t_size_dim, k; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "dimension out of range"); t_size_dim = THTensor_(size)(t, dimension); k = (t_size_dim-1) >> 1; /* take middle or one-before-middle element */ THTensor_(kthvalue)(values_, indices_, t, k+1, dimension, keepdim); } void THTensor_(topk)(THTensor *rt_, THLongTensor *ri_, THTensor *t, int64_t k, int dim, int dir, int sorted) { int numDims = THTensor_(nDimension)(t); THArgCheck(dim >= 0 && dim < numDims, 3, "dim not in range"); int64_t sliceSize = THTensor_(size)(t, dim); THArgCheck(k > 0 && k <= sliceSize, 2, "k not in range for dimension"); THTensor *tmpResults = THTensor_(new)(); THTensor_(resize1d)(tmpResults, sliceSize); real *tmp__data = THTensor_(data)(tmpResults); THLongTensor *tmpIndices = THLongTensor_new(); THLongTensor_resize1d(tmpIndices, sliceSize); int64_t *tmpi__data = THLongTensor_data(tmpIndices); THLongStorage *topKSize = THTensor_(newSizeOf)(t); THLongStorage_set(topKSize, dim, k); THTensor_(resize)(rt_, topKSize, NULL); THLongTensor_resize(ri_, topKSize, NULL); THLongStorage_free(topKSize); if (dir) { /* k largest elements, descending order (optional: see sorted) */ int64_t K = sliceSize - k; TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; for(i = 0; i < sliceSize; i++) { tmp__data[i] = t_data[i*t_stride]; tmpi__data[i] = i; } if (K > 0) THTensor_(quickselect)(tmp__data, tmpi__data, K - 1, sliceSize, 1); if (sorted) THTensor_(quicksortdescend)(tmp__data + K, tmpi__data + K, k, 1); for(i = 0; i < k; i++) { rt__data[i*rt__stride] = tmp__data[i + K]; ri__data[i*ri__stride] = tmpi__data[i + K]; }) } else { /* k smallest elements, ascending order (optional: see sorted) */ TH_TENSOR_DIM_APPLY3(real, t, real, rt_, int64_t, ri_, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM, int64_t i; for(i = 0; i < sliceSize; i++) { tmp__data[i] = t_data[i*t_stride]; tmpi__data[i] = i; } THTensor_(quickselect)(tmp__data, tmpi__data, k - 1, sliceSize, 1); if (sorted) THTensor_(quicksortascend)(tmp__data, tmpi__data, k - 1, 1); for(i = 0; i < k; i++) { rt__data[i*rt__stride] = tmp__data[i]; ri__data[i*ri__stride] = tmpi__data[i]; }) } THTensor_(free)(tmpResults); THLongTensor_free(tmpIndices); } void THTensor_(tril)(THTensor *r_, THTensor *t, int64_t k) { int64_t t_size_0, t_size_1; int64_t t_stride_0, t_stride_1; int64_t r__stride_0, r__stride_1; real *t_data, *r__data; int64_t r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { int64_t sz = THMin(r+k+1, t_size_1); for(c = THMax(0, r+k+1); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; } } void THTensor_(triu)(THTensor *r_, THTensor *t, int64_t k) { int64_t t_size_0, t_size_1; int64_t t_stride_0, t_stride_1; int64_t r__stride_0, r__stride_1; real *t_data, *r__data; int64_t r, c; THArgCheck(THTensor_(nDimension)(t) == 2, 1, "expected a matrix"); THTensor_(resizeAs)(r_, t); t_size_0 = THTensor_(size)(t, 0); t_size_1 = THTensor_(size)(t, 1); t_stride_0 = THTensor_(stride)(t, 0); t_stride_1 = THTensor_(stride)(t, 1); r__stride_0 = THTensor_(stride)(r_, 0); r__stride_1 = THTensor_(stride)(r_, 1); r__data = THTensor_(data)(r_); t_data = THTensor_(data)(t); for(r = 0; r < t_size_0; r++) { int64_t sz = THMin(r+k, t_size_1); for(c = THMax(0, r+k); c < t_size_1; c++) r__data[r*r__stride_0+c*r__stride_1] = t_data[r*t_stride_0+c*t_stride_1]; for(c = 0; c < sz; c++) r__data[r*r__stride_0+c*r__stride_1] = 0; } } void THTensor_(cat)(THTensor *r_, THTensor *ta, THTensor *tb, int dimension) { THTensor* inputs[2]; inputs[0] = ta; inputs[1] = tb; THTensor_(catArray)(r_, inputs, 2, dimension); } void THTensor_(catArray)(THTensor *result, THTensor **inputs, int numInputs, int dimension) { THLongStorage *size; int i, j; int64_t offset; int maxDim = dimension + 1; int allEmpty = 1; int allContiguous = 1; // cat_dimension is the actual dimension we cat along int cat_dimension = dimension; for (i = 0; i < numInputs; i++) { maxDim = THMax(maxDim, inputs[i]->nDimension); } // When the user input dimension is -1 (i.e. -2 in C) // Then we pick the maximum last dimension across all tensors. if ( dimension + TH_INDEX_BASE == -1 ) { cat_dimension = maxDim?(maxDim-1):0; } THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs); THArgCheck(cat_dimension >= 0, 4, "invalid dimension %d", dimension + TH_INDEX_BASE); size = THLongStorage_newWithSize(maxDim); for(i = 0; i < maxDim; i++) { // dimSize is either the size of the dim if it exists, either 1 if #dim > 0, otherwise 0 int64_t dimSize = i < inputs[0]->nDimension ? inputs[0]->size[i] : THMin(inputs[0]->nDimension, 1); if (i == cat_dimension) { for (j = 1; j < numInputs; j++) { // accumulate the size over the dimension we want to cat on. // Empty tensors are allowed dimSize += i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1); } } else { for (j = 1; j < numInputs; j++) { int64_t sz = (i < inputs[j]->nDimension ? inputs[j]->size[i] : THMin(inputs[j]->nDimension, 1)); // If it's a dimension we're not catting on // Then fail if sizes are different AND > 0 if (dimSize != sz && dimSize && sz) { THLongStorage_free(size); THError("inconsistent tensor sizes"); } else if(!dimSize) { dimSize = sz; } } } allEmpty = allEmpty && !dimSize; size->data[i] = dimSize; } // Initiate catting and resizing // If at least one of the input is not empty if (!allEmpty) { THTensor_(resize)(result, size, NULL); // Check contiguity of all inputs and result for (i = 0; i < numInputs; i++) { if(inputs[i]->nDimension) { allContiguous = allContiguous && THTensor_(isContiguous)(inputs[i]); } } allContiguous = allContiguous && THTensor_(isContiguous)(result); // First path is for contiguous inputs along dim 1 // Second path for non-contiguous if (cat_dimension == 0 && allContiguous) { real* result_data = result->storage->data + result->storageOffset; offset = 0; for (j = 0; j < numInputs; j++) { if (inputs[j]->nDimension) { THTensor* input0 = inputs[j]; real* input0_data = input0->storage->data + input0->storageOffset; int64_t input0_size = THTensor_(nElement)(input0); memcpy(result_data + offset, input0_data, input0_size*sizeof(real)); offset += input0_size; } } } else { offset = 0; for (j = 0; j < numInputs; j++) { if (inputs[j]->nDimension) { int64_t dimSize = cat_dimension < inputs[j]->nDimension ? inputs[j]->size[cat_dimension] : 1; THTensor *nt = THTensor_(newWithTensor)(result); THTensor_(narrow)(nt, NULL, cat_dimension, offset, dimSize); THTensor_(copy)(nt, inputs[j]); THTensor_(free)(nt); offset += dimSize; } } } } THLongStorage_free(size); } int THTensor_(equal)(THTensor *ta, THTensor* tb) { int equal = 1; if(!THTensor_(isSameSizeAs)(ta, tb)) return 0; if (THTensor_(isContiguous)(ta) && THTensor_(isContiguous)(tb)) { real *tap = THTensor_(data)(ta); real *tbp = THTensor_(data)(tb); ptrdiff_t sz = THTensor_(nElement)(ta); ptrdiff_t i; for (i=0; i<sz; ++i){ if(tap[i] != tbp[i]) return 0; } } else { // Short-circuit the apply function on inequality TH_TENSOR_APPLY2(real, ta, real, tb, if (equal && *ta_data != *tb_data) { equal = 0; TH_TENSOR_APPLY_hasFinished = 1; break; }) } return equal; } #define TENSOR_IMPLEMENT_LOGICAL(NAME,OP) \ void THTensor_(NAME##Value)(THByteTensor *r_, THTensor* t, real value) \ { \ THByteTensor_resizeNd(r_, t->nDimension, t->size, NULL); \ TH_TENSOR_APPLY2(unsigned char, r_, real, t, \ *r__data = (*t_data OP value) ? 1 : 0;); \ } \ void THTensor_(NAME##ValueT)(THTensor* r_, THTensor* t, real value) \ { \ THTensor_(resizeNd)(r_, t->nDimension, t->size, NULL); \ TH_TENSOR_APPLY2(real, r_, real, t, \ *r__data = (*t_data OP value) ? 1 : 0;); \ } \ void THTensor_(NAME##Tensor)(THByteTensor *r_, THTensor *ta, THTensor *tb) \ { \ THByteTensor_resizeNd(r_, ta->nDimension, ta->size, NULL); \ TH_TENSOR_APPLY3(unsigned char, r_, real, ta, real, tb, \ *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ } \ void THTensor_(NAME##TensorT)(THTensor *r_, THTensor *ta, THTensor *tb) \ { \ THTensor_(resizeNd)(r_, ta->nDimension, ta->size, NULL); \ TH_TENSOR_APPLY3(real, r_, real, ta, real, tb, \ *r__data = (*ta_data OP *tb_data) ? 1 : 0;); \ } \ TENSOR_IMPLEMENT_LOGICAL(lt,<) TENSOR_IMPLEMENT_LOGICAL(gt,>) TENSOR_IMPLEMENT_LOGICAL(le,<=) TENSOR_IMPLEMENT_LOGICAL(ge,>=) TENSOR_IMPLEMENT_LOGICAL(eq,==) TENSOR_IMPLEMENT_LOGICAL(ne,!=) #define LAB_IMPLEMENT_BASIC_FUNCTION(NAME, CFUNC) \ void THTensor_(NAME)(THTensor *r_, THTensor *t) \ { \ THTensor_(resizeAs)(r_, t); \ if (THTensor_(isContiguous)(r_) && THTensor_(isContiguous)(t)) { \ TH_TENSOR_APPLY2_CONTIG(real, r_, real, t, THVector_(NAME)(r__data, t_data, r__len);); \ } else { \ TH_TENSOR_APPLY2(real, r_, real, t, *r__data = CFUNC(*t_data);); \ } \ } #if defined(TH_REAL_IS_LONG) LAB_IMPLEMENT_BASIC_FUNCTION(abs,labs) LAB_IMPLEMENT_BASIC_FUNCTION(neg,-) #endif /* int64_t only part */ #if defined(TH_REAL_IS_SHORT) || defined(TH_REAL_IS_INT) LAB_IMPLEMENT_BASIC_FUNCTION(abs,abs) LAB_IMPLEMENT_BASIC_FUNCTION(neg,-) #endif /* int only part */ #if defined(TH_REAL_IS_BYTE) #define TENSOR_IMPLEMENT_LOGICAL_SUM(NAME, OP, INIT_VALUE) \ int THTensor_(NAME)(THTensor *tensor) \ { \ THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); \ int sum = INIT_VALUE; \ TH_TENSOR_APPLY(real, tensor, sum = sum OP *tensor_data;); \ return sum; \ } TENSOR_IMPLEMENT_LOGICAL_SUM(logicalall, &&, 1) TENSOR_IMPLEMENT_LOGICAL_SUM(logicalany, ||, 0) #endif /* Byte only part */ /* floating point only now */ #if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE) #if defined (TH_REAL_IS_FLOAT) #define TH_MATH_NAME(fn) fn##f #else #define TH_MATH_NAME(fn) fn #endif LAB_IMPLEMENT_BASIC_FUNCTION(log,TH_MATH_NAME(log)) LAB_IMPLEMENT_BASIC_FUNCTION(lgamma,TH_MATH_NAME(lgamma)) LAB_IMPLEMENT_BASIC_FUNCTION(log1p,TH_MATH_NAME(log1p)) LAB_IMPLEMENT_BASIC_FUNCTION(sigmoid,TH_MATH_NAME(TH_sigmoid)) LAB_IMPLEMENT_BASIC_FUNCTION(exp,TH_MATH_NAME(exp)) LAB_IMPLEMENT_BASIC_FUNCTION(cos,TH_MATH_NAME(cos)) LAB_IMPLEMENT_BASIC_FUNCTION(acos,TH_MATH_NAME(acos)) LAB_IMPLEMENT_BASIC_FUNCTION(cosh,TH_MATH_NAME(cosh)) LAB_IMPLEMENT_BASIC_FUNCTION(sin,TH_MATH_NAME(sin)) LAB_IMPLEMENT_BASIC_FUNCTION(asin,TH_MATH_NAME(asin)) LAB_IMPLEMENT_BASIC_FUNCTION(sinh,TH_MATH_NAME(sinh)) LAB_IMPLEMENT_BASIC_FUNCTION(tan,TH_MATH_NAME(tan)) LAB_IMPLEMENT_BASIC_FUNCTION(atan,TH_MATH_NAME(atan)) LAB_IMPLEMENT_BASIC_FUNCTION(tanh,TH_MATH_NAME(tanh)) LAB_IMPLEMENT_BASIC_FUNCTION(erf,TH_MATH_NAME(erf)) LAB_IMPLEMENT_BASIC_FUNCTION(erfinv,TH_erfinv) LAB_IMPLEMENT_BASIC_FUNCTION(sqrt,TH_MATH_NAME(sqrt)) LAB_IMPLEMENT_BASIC_FUNCTION(rsqrt,TH_MATH_NAME(TH_rsqrt)) LAB_IMPLEMENT_BASIC_FUNCTION(ceil,TH_MATH_NAME(ceil)) LAB_IMPLEMENT_BASIC_FUNCTION(floor,TH_MATH_NAME(floor)) LAB_IMPLEMENT_BASIC_FUNCTION(round,TH_MATH_NAME(round)) LAB_IMPLEMENT_BASIC_FUNCTION(abs,TH_MATH_NAME(fabs)) LAB_IMPLEMENT_BASIC_FUNCTION(trunc,TH_MATH_NAME(trunc)) LAB_IMPLEMENT_BASIC_FUNCTION(frac,TH_MATH_NAME(TH_frac)) LAB_IMPLEMENT_BASIC_FUNCTION(neg,-) LAB_IMPLEMENT_BASIC_FUNCTION(cinv, TH_MATH_NAME(1.0) / ) void THTensor_(pow)(THTensor *r_, THTensor *t, real value) { THTensor_(resizeAs)(r_, t); if(value == 1){ THTensor_(copy)(r_, t); } else if(value == 2){ THTensor_(cmul)(r_, t, t); } else if(value == 3){ TH_TENSOR_APPLY2(real, r_, real, t, *r__data = *t_data * *t_data * *t_data;); } else if(value == 0.5){ THTensor_(sqrt)(r_, t); } else if(value == -0.5){ THTensor_(rsqrt)(r_, t); } else if(value == -1){ THTensor_(cinv)(r_, t); } else if(value == -2){ TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(1.0) / (*t_data * *t_data);); } else{ TH_TENSOR_APPLY2(real, r_, real, t, *r__data = TH_MATH_NAME(pow)(*t_data, value);); } } void THTensor_(atan2)(THTensor *r_, THTensor *tx, THTensor *ty) { THTensor_(resizeAs)(r_, tx); TH_TENSOR_APPLY3(real, r_, real, tx, real, ty, *r__data = TH_MATH_NAME(atan2)(*tx_data,*ty_data);); } void THTensor_(lerp)(THTensor *r_, THTensor *a, THTensor *b, real weight) { THArgCheck(THTensor_(nElement)(a) == THTensor_(nElement)(b), 2, "sizes do not match"); THTensor_(resizeAs)(r_, a); TH_TENSOR_APPLY3(real, r_, real, a, real, b, *r__data = TH_MATH_NAME(TH_lerp)(*a_data, *b_data, weight);); } void THTensor_(mean)(THTensor *r_, THTensor *t, int dimension, int keepdim) { THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); THTensor_(sum)(r_, t, dimension, keepdim); THTensor_(div)(r_, r_, t->size[dimension]); } void THTensor_(std)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, // Uses Welford's algorithm for numeric stability accreal mean = 0; accreal M2 = 0; int64_t i; for (i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; real delta = z - mean; mean += delta / (i + 1); real delta2 = z - mean; M2 += delta * delta2; } if (biased && t_size >= 2) { *r__data = TH_MATH_NAME(sqrt)(M2 / t_size); } else if (!biased && t_size >= 2) { *r__data = TH_MATH_NAME(sqrt)(M2 / (t_size - 1)); } else if (biased && t_size == 1) { *r__data = 0; } else { *r__data = NAN; }); if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } } void THTensor_(var)(THTensor *r_, THTensor *t, int dimension, int biased, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, // Uses Welford's algorithm for numeric stability accreal mean = 0; accreal M2 = 0; int64_t i; for (i = 0; i < t_size; i++) { real z = t_data[i*t_stride]; real delta = z - mean; mean += delta / (i + 1); real delta2 = z - mean; M2 += delta * delta2; } if (biased && t_size >= 2) { *r__data = M2 / t_size; } else if (!biased && t_size >= 2) { *r__data = M2 / (t_size - 1); } else if (biased && t_size == 1) { *r__data = 0; } else { *r__data = NAN; }); if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } } void THTensor_(norm)(THTensor *r_, THTensor *t, real value, int dimension, int keepdim) { THLongStorage *dim; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(t), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); dim = THTensor_(newSizeOf)(t); THLongStorage_set(dim, dimension, 1); THTensor_(resize)(r_, dim, NULL); THLongStorage_free(dim); if(value == 0) { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; int64_t i; for(i = 0; i < t_size; i++) sum += t_data[i*t_stride] != 0.0; *r__data = sum;) } else { TH_TENSOR_DIM_APPLY2(real, t, real, r_, dimension, accreal sum = 0; int64_t i; for(i = 0; i < t_size; i++) { sum += TH_MATH_NAME(pow)( TH_MATH_NAME(fabs)(t_data[i*t_stride]), value); } *r__data = TH_MATH_NAME(pow)(sum, 1.0/value);) } if (!keepdim) { THTensor_(squeeze1d)(r_, r_, dimension); } } accreal THTensor_(normall)(THTensor *tensor, real value) { accreal sum = 0; if(value == 0) { TH_TENSOR_APPLY(real, tensor, sum += *tensor_data != 0.0;); return sum; } else if(value == 1) { TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(fabs)(*tensor_data);); return sum; } else if(value == 2) { TH_TENSOR_APPLY(real, tensor, accreal z = *tensor_data; sum += z*z;); return sqrt(sum); } else { TH_TENSOR_APPLY(real, tensor, sum += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*tensor_data), value);); return TH_MATH_NAME(pow)(sum, 1.0/value); } } void THTensor_(renorm)(THTensor *res, THTensor *src, real value, int dimension, real maxnorm) { int i; THTensor *rowR, *rowS; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(src), 3, "invalid dimension %d", dimension + TH_INDEX_BASE); THArgCheck(value > 0, 2, "non-positive-norm not supported"); THArgCheck(THTensor_(nDimension)(src) > 1, 1, "need at least 2 dimensions, got %d dimensions", THTensor_(nDimension)(src)); rowR = THTensor_(new)(); rowS = THTensor_(new)(); THTensor_(resizeAs)(res, src); for (i=0; i<src->size[dimension]; i++) { real norm = 0; real new_norm; THTensor_(select)(rowS, src, dimension, i); THTensor_(select)(rowR, res, dimension, i); if (value == 1) { TH_TENSOR_APPLY(real, rowS, norm += fabs(*rowS_data);); } else if (value == 2) { TH_TENSOR_APPLY(real, rowS, accreal z = *rowS_data; norm += z*z;); } else { TH_TENSOR_APPLY(real, rowS, norm += TH_MATH_NAME(pow)(TH_MATH_NAME(fabs)(*rowS_data), value);); } norm = pow(norm, 1/value); if (norm > maxnorm) { new_norm = maxnorm / (norm + 1e-7); TH_TENSOR_APPLY2( real, rowR, real, rowS, *rowR_data = (*rowS_data) * new_norm; ) } else THTensor_(copy)(rowR, rowS); } THTensor_(free)(rowR); THTensor_(free)(rowS); } accreal THTensor_(dist)(THTensor *tensor, THTensor *src, real value) { real sum = 0; TH_TENSOR_APPLY2(real, tensor, real, src, sum += TH_MATH_NAME(pow)( TH_MATH_NAME(fabs)(*tensor_data - *src_data), value);); return TH_MATH_NAME(pow)(sum, 1.0/value); } accreal THTensor_(meanall)(THTensor *tensor) { THArgCheck(tensor->nDimension > 0, 1, "empty Tensor"); return THTensor_(sumall)(tensor)/THTensor_(nElement)(tensor); } accreal THTensor_(varall)(THTensor *tensor, int biased) { accreal mean = THTensor_(meanall)(tensor); accreal sum = 0; TH_TENSOR_APPLY(real, tensor, sum += (*tensor_data - mean)*(*tensor_data - mean);); sum /= THTensor_(nElement)(tensor) - (biased ? 0 : 1); return sum; } accreal THTensor_(stdall)(THTensor *tensor, int biased) { return sqrt(THTensor_(varall)(tensor, biased)); } void THTensor_(linspace)(THTensor *r_, real a, real b, int64_t n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THTensor_(nElement)(r_) != n) { THTensor_(resize1d)(r_, n); } if(n == 1) { THTensor_(set1d)(r_, 0, a); } else { TH_TENSOR_APPLY(real, r_, *r__data = a + i*(b-a)/((real)(n-1)); i++; ); } } void THTensor_(logspace)(THTensor *r_, real a, real b, int64_t n) { real i = 0; THArgCheck(n > 1 || (n == 1 && (a == b)), 3, "invalid number of points"); if (THTensor_(nElement)(r_) != n) { THTensor_(resize1d)(r_, n); } if(n == 1) { THTensor_(set1d)(r_, 0, TH_MATH_NAME(pow)(10.0, a)); } else { TH_TENSOR_APPLY(real, r_, *r__data = TH_MATH_NAME(pow)(10.0, a + i*(b-a)/((real)(n-1))); i++; ); } } void THTensor_(rand)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(uniform)(r_, _generator, 0, 1); } void THTensor_(randn)(THTensor *r_, THGenerator *_generator, THLongStorage *size) { THTensor_(resize)(r_, size, NULL); THTensor_(normal)(r_, _generator, 0, 1); } void THTensor_(histc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue) { real minval; real maxval; real *h_data; THTensor_(resize1d)(hist, nbins); THTensor_(zero)(hist); minval = minvalue; maxval = maxvalue; if (minval == maxval) { minval = THTensor_(minall)(tensor); maxval = THTensor_(maxall)(tensor); } if (minval == maxval) { minval = minval - 1; maxval = maxval + 1; } h_data = THTensor_(data)(hist); TH_TENSOR_APPLY(real, tensor, if (*tensor_data >= minval && *tensor_data <= maxval) { const int bin = (int)((*tensor_data-minval) / (maxval-minval) * nbins); h_data[THMin(bin, nbins-1)] += 1; } ); } void THTensor_(bhistc)(THTensor *hist, THTensor *tensor, int64_t nbins, real minvalue, real maxvalue) { THArgCheck(THTensor_(nDimension)(tensor) < 3, 2, "invalid dimension %d, the input must be a 2d tensor", THTensor_(nDimension)(tensor)); int dimension = 1; THArgCheck(dimension >= 0 && dimension < THTensor_(nDimension)(tensor), 2, "invalid dimension %d", dimension + TH_INDEX_BASE); real minval; real maxval; real *h_data; THTensor_(resize2d)(hist, tensor->size[0], nbins); THTensor_(zero)(hist); minval = minvalue; maxval = maxvalue; if (minval == maxval) { minval = THTensor_(minall)(tensor); maxval = THTensor_(maxall)(tensor); } if (minval == maxval) { minval = minval - 1; maxval = maxval + 1; } TH_TENSOR_DIM_APPLY2(real, tensor, real, hist, dimension, int64_t i; for(i = 0; i < tensor_size; i++) { if(tensor_data[i*tensor_stride] >= minval && tensor_data[i*tensor_stride] <= maxval) { const int bin = (int)((tensor_data[i*tensor_stride]-minval) / (maxval-minval) * nbins); hist_data[THMin(bin, nbins-1)] += 1; } } ); } #undef TH_MATH_NAME #endif /* floating point only part */ #undef IS_NONZERO #endif
declare_reduction_codegen.c
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-passes | FileCheck %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix=CHECK-LOAD %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes -fopenmp-version=45 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes -fopenmp-version=45 | FileCheck --check-prefixes=CHECK-LOAD,OMP45-LOAD %s // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c -emit-llvm %s -triple %itanium_abi_triple -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c -triple %itanium_abi_triple -emit-pch -o %t %s -femit-all-decls -disable-llvm-passes // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c -triple %itanium_abi_triple -include-pch %t -verify %s -emit-llvm -o - -femit-all-decls -disable-llvm-passes | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK: [[SSS_INT:.+]] = type { i32 } // CHECK-LOAD: [[SSS_INT:.+]] = type { i32 } // CHECK-DAG: [[SSS_INIT:@.+]] = private constant %struct.SSS zeroinitializer // CHECK-DAG: [[INT_INIT:@.+]] = private constant i32 0 #pragma omp declare reduction(+ : int, char : omp_out *= omp_in) // CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1) // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: store i32 [[MUL]], i32* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1) // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: store i32 [[MUL]], i32* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1) // CHECK: sext i8 // CHECK: sext i8 // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-NEXT: store i8 [[TRUNC]], i8* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1) // CHECK-LOAD: sext i8 // CHECK-LOAD: sext i8 // CHECK-LOAD: [[MUL:%.+]] = mul nsw i32 // CHECK-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-LOAD-NEXT: store i8 [[TRUNC]], i8* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } #pragma omp declare reduction(fun : float : omp_out += omp_in) initializer(omp_priv = 15 + omp_orig) // CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1) // CHECK: [[ADD:%.+]] = fadd float // CHECK-NEXT: store float [[ADD]], float* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1) // CHECK: [[ADD:%.+]] = fadd float 1.5 // CHECK-NEXT: store float [[ADD]], float* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1) // CHECK-LOAD: [[ADD:%.+]] = fadd float // CHECK-LOAD-NEXT: store float [[ADD]], float* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(float* noalias noundef %0, float* noalias noundef %1) // CHECK-LOAD: [[ADD:%.+]] = fadd float 1.5 // CHECK-LOAD-NEXT: store float [[ADD]], float* // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } struct SSS { int field; #pragma omp declare reduction(+ : int, char : omp_out *= omp_in) // CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1) // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: store i32 [[MUL]], i32* // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1) // CHECK: sext i8 // CHECK: sext i8 // CHECK: [[MUL:%.+]] = mul nsw i32 // CHECK-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // CHECK-NEXT: store i8 [[TRUNC]], i8* // CHECK-NEXT: ret void // CHECK-NEXT: } }; void init(struct SSS *priv, struct SSS orig); #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK: call void @llvm.memcpy // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK: call void @init( // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK-LOAD: call void @llvm.memcpy // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK-LOAD: call void @init( // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LABEL: @main // CHECK-LOAD-LABEL: @main int main(void) { #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK: call void @llvm.memcpy // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK: call void @init( // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK-LOAD: call void @llvm.memcpy // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK-LOAD: call void @init( // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } { #pragma omp declare reduction(fun : struct SSS : omp_out = omp_in) initializer(init(&omp_priv, omp_orig)) // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK: call void @llvm.memcpy // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK: call void @init( // CHECK-NEXT: ret void // CHECK-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK-LOAD: call void @llvm.memcpy // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}([[SSS_INT]]* noalias noundef %0, [[SSS_INT]]* noalias noundef %1) // CHECK-LOAD: call void @init( // CHECK-LOAD-NEXT: ret void // CHECK-LOAD-NEXT: } } return 0; } // OMP45-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias noundef %0, i32* noalias noundef %1) // OMP45-LOAD: [[MUL:%.+]] = mul nsw i32 // OMP45-LOAD-NEXT: store i32 [[MUL]], i32* // OMP45-LOAD-NEXT: ret void // OMP45-LOAD-NEXT: } // OMP45-LOAD: define internal {{.*}}void @{{[^(]+}}(i8* noalias noundef %0, i8* noalias noundef %1) // OMP45-LOAD: sext i8 // OMP45-LOAD: sext i8 // OMP45-LOAD: [[MUL:%.+]] = mul nsw i32 // OMP45-LOAD-NEXT: [[TRUNC:%.+]] = trunc i32 [[MUL]] to i8 // OMP45-LOAD-NEXT: store i8 [[TRUNC]], i8* // OMP45-LOAD-NEXT: ret void // OMP45-LOAD-NEXT: } // CHECK-LABEL: bar struct SSS ss; int in; void bar(void) { // CHECK: [[SS_PRIV:%.+]] = alloca %struct.SSS, // CHECK: [[IN_PRIV:%.+]] = alloca i32, // CHECK: [[BC:%.+]] = bitcast %struct.SSS* [[SS_PRIV]] to i8* // CHECK: call void @llvm.memcpy.p0i8.p0i8.i{{64|32}}(i8* {{.*}}[[BC]], i8* {{.*}}bitcast (%struct.SSS* [[SSS_INIT]] to i8*), i{{64|32}} 4, i1 false) // CHECK: [[IN_VAL:%.+]] = load i32, i32* [[INT_INIT]], // CHECK: store i32 [[IN_VAL]], i32* [[IN_PRIV]], // CHECK: call void @__kmpc_for_static_init_4( #pragma omp declare reduction(+ \ : struct SSS \ : omp_out = omp_in) #pragma omp declare reduction(+ \ : int \ : omp_out = omp_in) #pragma omp for reduction(+ \ : ss, in) for (int i = 0; i < 10; ++i) ; } #endif
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispAttr::Mode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. std::unique_ptr<MangleNumberingContext> MangleNumbering; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering(), ExprContext(ExprContext) {} /// Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {} ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; } private: Sema& S; FPOptions OldFPFeaturesState; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T) { return !RequireCompleteTypeImpl(Loc, T, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate, NC_UndeclaredTemplate, }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr( NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex, StringRef Uuid); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL); bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn, QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfOnlyViableOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfOnlyViableOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, Stmt *InitStmt, ConditionResult Cond, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Stmt *InitStmt, ConditionResult Cond); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLoc, Expr *Length, SourceLocation RBLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, bool IsConstexprSpecified); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, SourceLocation TemplateKWLoc = SourceLocation(), AssumedTemplateKind *ATK = nullptr); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression, UPPC_Block }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name, unsigned SpellingListIndex, bool InInstantiation = false); void AddParameterABIAttr(SourceRange AttrRange, Decl *D, ParameterABI ABI, unsigned SpellingListIndex); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min, Expr *Max, unsigned SpellingListIndex); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = Ext; } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Check whether we're allowed to call Callee from the current function. void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); /// Check if the expression is allowed to be used in expressions for the /// OpenMP devices. void checkOpenMPDeviceExpr(const Expr *E); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); public: /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD, Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); /// Called at the end of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S, ArrayRef<OMPClause *> ClauseList); /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OMPDeclareTargetDeclAttr::MapTypeTy MT, NamedDeclSetType &SameDirectiveDecls); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return true if (un)supported features for the current target should be /// diagnosed if OpenMP (offloading) is enabled. bool shouldDiagnoseTargetSupportFromOpenMP() const { return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() || isInOpenMPTargetExecutionDirective(); } /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind, OpenMPLinearClauseKind LinKind, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation DepLinMapLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause *ActOnOpenMPFromClause( ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// A partial call graph maintained during CUDA/OpenMP device code compilation /// to support deferred diagnostics. /// /// Functions are only added here if, at the time they're considered, they are /// not known-emitted. As soon as we discover that a function is /// known-emitted, we remove it and everything it transitively calls from this /// set and add those functions to DeviceKnownEmittedFns. llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>, /* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>, SourceLocation>> DeviceCallGraph; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Indicate that this function (and thus everything it transtively calls) /// will be codegen'ed, and emit any deferred diagnostics on this function and /// its (transitive) callees. void markKnownEmitted( Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee, SourceLocation OrigLoc, const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas declared inside __device__ or __global__ functions inherit /// the __device__ attribute. Similarly, lambdas inside __host__ __device__ /// functions become __host__ __device__ themselves. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteAfterIf(Scope *S); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, QualType BaseType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; private: class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedDllExportClasses.empty() && "there shouldn't be any pending delayed DLL export classes"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; decltype(DelayedDllExportClasses) SavedDllExportClasses; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); SavedDllExportClasses.swap(S.DelayedDllExportClasses); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
declare-target-2.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ extern int a; #pragma omp declare target #pragma omp declare target to (a) /* { dg-error "with clauses in between" } */ #pragma omp end declare target int b; #pragma omp declare target to (b) link (b) /* { dg-error "appears more than once on the same .declare target. directive" } */ int c; #pragma omp declare target (c) #pragma omp declare target link (c) /* { dg-error "specified both in declare target" } */ int foo (void); #pragma omp declare target link (foo) /* { dg-error "is not a variable in clause" } */ struct S; extern struct S d[]; /* { dg-error "array type has incomplete element type" "" { target c } } */ #pragma omp declare target to (d) /* { dg-error "does not have a mappable type in" } */ extern struct S e; #pragma omp declare target link (e) /* { dg-error "does not have a mappable type in" } */ extern int f[]; #pragma omp declare target to (f) /* { dg-error "does not have a mappable type in" } */ int g, h; #pragma omp threadprivate (g, h) #pragma omp declare target to (g) /* { dg-error "is threadprivate variable in" } */ #pragma omp declare target link (h) /* { dg-error "is threadprivate variable in" } */ int j[10]; #pragma omp declare target to (j[0:4]) /* { dg-error "expected" } */ int k, l; #pragma omp declare target int m; #pragma omp end declare target #pragma omp declare target to (k) #pragma omp declare target (k) #pragma omp declare target to (k, m) link (l) #pragma omp declare target link (l) int n, o, s, t; #pragma omp declare target to (n) to (n) /* { dg-error "appears more than once on the same .declare target. directive" } */ #pragma omp declare target link (o, o) /* { dg-error "appears more than once on the same .declare target. directive" } */ #pragma omp declare target (s, t, s) /* { dg-error "appears more than once on the same .declare target. directive" } */ int p, q, r; #pragma omp declare target (p) to (q) /* { dg-error "expected end of line before .to." } */ #pragma omp declare target to (p) (q) link (r) /* { dg-error "expected .#pragma omp. clause before" } */ #pragma omp declare target link (r) (p) /* { dg-error "expected .#pragma omp. clause before" } */ #pragma omp declare target #pragma omp end declare target to (p) /* { dg-error "expected end of line before .to." } */
convolutiondepthwise_3x3_pack8_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const signed char* k0 = kernel.row<const signed char>(g); int* outptr0 = out.row<int>(0); int* outptr1 = out.row<int>(1); const Mat img0 = bottom_blob.channel(g); const signed char* r0 = img0.row<const signed char>(0); const signed char* r1 = img0.row<const signed char>(1); const signed char* r2 = img0.row<const signed char>(2); const signed char* r3 = img0.row<const signed char>(3); int8x8_t _k00 = vld1_s8(k0); int8x8_t _k01 = vld1_s8(k0 + 8); int8x8_t _k02 = vld1_s8(k0 + 16); int8x8_t _k10 = vld1_s8(k0 + 24); int8x8_t _k11 = vld1_s8(k0 + 32); int8x8_t _k12 = vld1_s8(k0 + 40); int8x8_t _k20 = vld1_s8(k0 + 48); int8x8_t _k21 = vld1_s8(k0 + 56); int8x8_t _k22 = vld1_s8(k0 + 64); int i = 0; for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 1 < outw; j += 2) { int8x16_t _r0001 = vld1q_s8(r0); int8x16_t _r0203 = vld1q_s8(r0 + 16); int8x16_t _r1011 = vld1q_s8(r1); int8x16_t _r1213 = vld1q_s8(r1 + 16); int8x16_t _r2021 = vld1q_s8(r2); int8x16_t _r2223 = vld1q_s8(r2 + 16); int8x16_t _r3031 = vld1q_s8(r3); int8x16_t _r3233 = vld1q_s8(r3 + 16); int16x8_t _s00 = vmull_s8(vget_low_s8(_r0001), _k00); int16x8_t _s01 = vmull_s8(vget_high_s8(_r0001), _k01); int16x8_t _s02 = vmull_s8(vget_low_s8(_r0203), _k02); int16x8_t _s03 = vmull_s8(vget_low_s8(_r1011), _k10); int16x8_t _s10 = vmull_s8(vget_high_s8(_r0001), _k00); int16x8_t _s11 = vmull_s8(vget_low_s8(_r0203), _k01); int16x8_t _s12 = vmull_s8(vget_high_s8(_r0203), _k02); int16x8_t _s13 = vmull_s8(vget_high_s8(_r1011), _k10); int16x8_t _s20 = vmull_s8(vget_low_s8(_r1011), _k00); int16x8_t _s21 = vmull_s8(vget_high_s8(_r1011), _k01); int16x8_t _s22 = vmull_s8(vget_low_s8(_r1213), _k02); int16x8_t _s23 = vmull_s8(vget_low_s8(_r2021), _k10); int16x8_t _s30 = vmull_s8(vget_high_s8(_r1011), _k00); int16x8_t _s31 = vmull_s8(vget_low_s8(_r1213), _k01); int16x8_t _s32 = vmull_s8(vget_high_s8(_r1213), _k02); int16x8_t _s33 = vmull_s8(vget_high_s8(_r2021), _k10); _s00 = vmlal_s8(_s00, vget_high_s8(_r1011), _k11); _s01 = vmlal_s8(_s01, vget_low_s8(_r1213), _k12); _s02 = vmlal_s8(_s02, vget_low_s8(_r2021), _k20); _s03 = vmlal_s8(_s03, vget_high_s8(_r2021), _k21); _s10 = vmlal_s8(_s10, vget_low_s8(_r1213), _k11); _s11 = vmlal_s8(_s11, vget_high_s8(_r1213), _k12); _s12 = vmlal_s8(_s12, vget_high_s8(_r2021), _k20); _s13 = vmlal_s8(_s13, vget_low_s8(_r2223), _k21); _s20 = vmlal_s8(_s20, vget_high_s8(_r2021), _k11); _s21 = vmlal_s8(_s21, vget_low_s8(_r2223), _k12); _s22 = vmlal_s8(_s22, vget_low_s8(_r3031), _k20); _s23 = vmlal_s8(_s23, vget_high_s8(_r3031), _k21); _s30 = vmlal_s8(_s30, vget_low_s8(_r2223), _k11); _s31 = vmlal_s8(_s31, vget_high_s8(_r2223), _k12); _s32 = vmlal_s8(_s32, vget_high_s8(_r3031), _k20); _s33 = vmlal_s8(_s33, vget_low_s8(_r3233), _k21); int16x8_t _s08 = vmull_s8(vget_low_s8(_r2223), _k22); int16x8_t _s18 = vmull_s8(vget_high_s8(_r2223), _k22); int16x8_t _s28 = vmull_s8(vget_low_s8(_r3233), _k22); int16x8_t _s38 = vmull_s8(vget_high_s8(_r3233), _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); int32x4_t _sum20 = vaddl_s16(vget_low_s16(_s20), vget_low_s16(_s21)); int32x4_t _sum21 = vaddl_s16(vget_high_s16(_s20), vget_high_s16(_s21)); int32x4_t _sum22 = vaddl_s16(vget_low_s16(_s22), vget_low_s16(_s23)); int32x4_t _sum23 = vaddl_s16(vget_high_s16(_s22), vget_high_s16(_s23)); int32x4_t _sum30 = vaddl_s16(vget_low_s16(_s30), vget_low_s16(_s31)); int32x4_t _sum31 = vaddl_s16(vget_high_s16(_s30), vget_high_s16(_s31)); int32x4_t _sum32 = vaddl_s16(vget_low_s16(_s32), vget_low_s16(_s33)); int32x4_t _sum33 = vaddl_s16(vget_high_s16(_s32), vget_high_s16(_s33)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum20 = vaddw_s16(_sum20, vget_low_s16(_s28)); _sum21 = vaddw_s16(_sum21, vget_high_s16(_s28)); _sum30 = vaddw_s16(_sum30, vget_low_s16(_s38)); _sum31 = vaddw_s16(_sum31, vget_high_s16(_s38)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); _sum20 = vaddq_s32(_sum20, _sum22); _sum21 = vaddq_s32(_sum21, _sum23); _sum30 = vaddq_s32(_sum30, _sum32); _sum31 = vaddq_s32(_sum31, _sum33); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr0 + 8, _sum10); vst1q_s32(outptr0 + 12, _sum11); vst1q_s32(outptr1, _sum20); vst1q_s32(outptr1 + 4, _sum21); vst1q_s32(outptr1 + 8, _sum30); vst1q_s32(outptr1 + 12, _sum31); r0 += 16; r1 += 16; r2 += 16; r3 += 16; outptr0 += 16; outptr1 += 16; } for (; j < outw; j++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int8x8_t _r30 = vld1_s8(r3); int8x8_t _r31 = vld1_s8(r3 + 8); int8x8_t _r32 = vld1_s8(r3 + 16); int16x8_t _s00 = vmull_s8(_r00, _k00); int16x8_t _s01 = vmull_s8(_r01, _k01); int16x8_t _s02 = vmull_s8(_r02, _k02); int16x8_t _s03 = vmull_s8(_r10, _k10); int16x8_t _s10 = vmull_s8(_r10, _k00); int16x8_t _s11 = vmull_s8(_r11, _k01); int16x8_t _s12 = vmull_s8(_r12, _k02); int16x8_t _s13 = vmull_s8(_r20, _k10); _s00 = vmlal_s8(_s00, _r11, _k11); _s01 = vmlal_s8(_s01, _r12, _k12); _s02 = vmlal_s8(_s02, _r20, _k20); _s03 = vmlal_s8(_s03, _r21, _k21); _s10 = vmlal_s8(_s10, _r21, _k11); _s11 = vmlal_s8(_s11, _r22, _k12); _s12 = vmlal_s8(_s12, _r30, _k20); _s13 = vmlal_s8(_s13, _r31, _k21); int16x8_t _s08 = vmull_s8(_r22, _k22); int16x8_t _s18 = vmull_s8(_r32, _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr1, _sum10); vst1q_s32(outptr1 + 4, _sum11); r0 += 8; r1 += 8; r2 += 8; r3 += 8; outptr0 += 8; outptr1 += 8; } r0 += 2 * 8 + w * 8; r1 += 2 * 8 + w * 8; r2 += 2 * 8 + w * 8; r3 += 2 * 8 + w * 8; outptr0 += outw * 8; outptr1 += outw * 8; } for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { int8x16_t _r0001 = vld1q_s8(r0); int8x16_t _r0203 = vld1q_s8(r0 + 16); int8x16_t _r1011 = vld1q_s8(r1); int8x16_t _r1213 = vld1q_s8(r1 + 16); int8x16_t _r2021 = vld1q_s8(r2); int8x16_t _r2223 = vld1q_s8(r2 + 16); int16x8_t _s00 = vmull_s8(vget_low_s8(_r0001), _k00); int16x8_t _s01 = vmull_s8(vget_high_s8(_r0001), _k01); int16x8_t _s02 = vmull_s8(vget_low_s8(_r0203), _k02); int16x8_t _s03 = vmull_s8(vget_low_s8(_r1011), _k10); int16x8_t _s10 = vmull_s8(vget_high_s8(_r0001), _k00); int16x8_t _s11 = vmull_s8(vget_low_s8(_r0203), _k01); int16x8_t _s12 = vmull_s8(vget_high_s8(_r0203), _k02); int16x8_t _s13 = vmull_s8(vget_high_s8(_r1011), _k10); _s00 = vmlal_s8(_s00, vget_high_s8(_r1011), _k11); _s01 = vmlal_s8(_s01, vget_low_s8(_r1213), _k12); _s02 = vmlal_s8(_s02, vget_low_s8(_r2021), _k20); _s03 = vmlal_s8(_s03, vget_high_s8(_r2021), _k21); _s10 = vmlal_s8(_s10, vget_low_s8(_r1213), _k11); _s11 = vmlal_s8(_s11, vget_high_s8(_r1213), _k12); _s12 = vmlal_s8(_s12, vget_high_s8(_r2021), _k20); _s13 = vmlal_s8(_s13, vget_low_s8(_r2223), _k21); int16x8_t _s08 = vmull_s8(vget_low_s8(_r2223), _k22); int16x8_t _s18 = vmull_s8(vget_high_s8(_r2223), _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr0 + 8, _sum10); vst1q_s32(outptr0 + 12, _sum11); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; } for (; j < outw; j++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int16x8_t _s0 = vmull_s8(_r00, _k00); int16x8_t _s1 = vmull_s8(_r01, _k01); int16x8_t _s2 = vmull_s8(_r02, _k02); int16x8_t _s3 = vmull_s8(_r10, _k10); _s0 = vmlal_s8(_s0, _r11, _k11); _s1 = vmlal_s8(_s1, _r12, _k12); _s2 = vmlal_s8(_s2, _r20, _k20); _s3 = vmlal_s8(_s3, _r21, _k21); int16x8_t _s4 = vmull_s8(_r22, _k22); int32x4_t _sum0 = vaddl_s16(vget_low_s16(_s0), vget_low_s16(_s1)); int32x4_t _sum1 = vaddl_s16(vget_high_s16(_s0), vget_high_s16(_s1)); int32x4_t _sum2 = vaddl_s16(vget_low_s16(_s2), vget_low_s16(_s3)); int32x4_t _sum3 = vaddl_s16(vget_high_s16(_s2), vget_high_s16(_s3)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s4)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s4)); _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); r0 += 8; r1 += 8; r2 += 8; outptr0 += 8; } r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; } } } static void convdw3x3s2_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const signed char* k0 = kernel.row<const signed char>(g); int* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const signed char* r0 = img0.row<const signed char>(0); const signed char* r1 = img0.row<const signed char>(1); const signed char* r2 = img0.row<const signed char>(2); int8x8_t _k00 = vld1_s8(k0); int8x8_t _k01 = vld1_s8(k0 + 8); int8x8_t _k02 = vld1_s8(k0 + 16); int8x8_t _k10 = vld1_s8(k0 + 24); int8x8_t _k11 = vld1_s8(k0 + 32); int8x8_t _k12 = vld1_s8(k0 + 40); int8x8_t _k20 = vld1_s8(k0 + 48); int8x8_t _k21 = vld1_s8(k0 + 56); int8x8_t _k22 = vld1_s8(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r03 = vld1_s8(r0 + 24); int8x8_t _r04 = vld1_s8(r0 + 32); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r13 = vld1_s8(r1 + 24); int8x8_t _r14 = vld1_s8(r1 + 32); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int8x8_t _r23 = vld1_s8(r2 + 24); int8x8_t _r24 = vld1_s8(r2 + 32); int16x8_t _s00 = vmull_s8(_r00, _k00); int16x8_t _s01 = vmull_s8(_r01, _k01); int16x8_t _s02 = vmull_s8(_r02, _k02); int16x8_t _s03 = vmull_s8(_r10, _k10); int16x8_t _s10 = vmull_s8(_r02, _k00); int16x8_t _s11 = vmull_s8(_r03, _k01); int16x8_t _s12 = vmull_s8(_r04, _k02); int16x8_t _s13 = vmull_s8(_r12, _k10); _s00 = vmlal_s8(_s00, _r11, _k11); _s01 = vmlal_s8(_s01, _r12, _k12); _s02 = vmlal_s8(_s02, _r20, _k20); _s03 = vmlal_s8(_s03, _r21, _k21); _s10 = vmlal_s8(_s10, _r13, _k11); _s11 = vmlal_s8(_s11, _r14, _k12); _s12 = vmlal_s8(_s12, _r22, _k20); _s13 = vmlal_s8(_s13, _r23, _k21); int16x8_t _s08 = vmull_s8(_r22, _k22); int16x8_t _s18 = vmull_s8(_r24, _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr0 + 8, _sum10); vst1q_s32(outptr0 + 12, _sum11); r0 += 32; r1 += 32; r2 += 32; outptr0 += 16; } for (; j < outw; j++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int16x8_t _s0 = vmull_s8(_r00, _k00); int16x8_t _s1 = vmull_s8(_r01, _k01); int16x8_t _s2 = vmull_s8(_r02, _k02); int16x8_t _s3 = vmull_s8(_r10, _k10); _s0 = vmlal_s8(_s0, _r11, _k11); _s1 = vmlal_s8(_s1, _r12, _k12); _s2 = vmlal_s8(_s2, _r20, _k20); _s3 = vmlal_s8(_s3, _r21, _k21); int16x8_t _s4 = vmull_s8(_r22, _k22); int32x4_t _sum0 = vaddl_s16(vget_low_s16(_s0), vget_low_s16(_s1)); int32x4_t _sum1 = vaddl_s16(vget_high_s16(_s0), vget_high_s16(_s1)); int32x4_t _sum2 = vaddl_s16(vget_low_s16(_s2), vget_low_s16(_s3)); int32x4_t _sum3 = vaddl_s16(vget_high_s16(_s2), vget_high_s16(_s3)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s4)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s4)); _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); r0 += 16; r1 += 16; r2 += 16; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
DRB004-antidep2-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two nested loops with loop-carried anti-dependence on the outer level. This is a variable-length array version in C99. Data race pair: a[i][j]@70:7 vs. a[i+1][j]@70:18 */ #include <stdlib.h> #include <stdio.h> int main(int argc,char *argv[]) { int i, j; int len = 20; if (argc>1) len = atoi(argv[1]); double a[len][len]; #pragma omp parallel for private(j) for (i=0; i< len; i++) #pragma omp parallel for simd for (j=0; j<len; j++) a[i][j] = 0.5; for (i = 0; i < len - 1; i += 1) { #pragma omp parallel for simd for (j = 0; j < len ; j += 1) { a[i][j] += a[i + 1][j]; } } #pragma omp parallel for private(j) ordered for (i=0; i< len; i++) #pragma omp parallel for simd ordered for (j=0; j<len; j++) #pragma omp ordered simd printf("%lf\n",a[i][j]); return 0; }
GB_unaryop__abs_uint64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_uint8 // op(A') function: GB_tran__abs_uint64_uint8 // C type: uint64_t // A type: uint8_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_uint8 ( uint64_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__lnot_fp64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_fp64_bool // op(A') function: GB_tran__lnot_fp64_bool // C type: double // A type: bool // cast: double cij = (double) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ bool #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_FP64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_fp64_bool ( double *restrict Cx, const bool *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_fp64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
openmp_eigen_dynamic.c
#include <stdlib.h> #include <stdio.h> #include <mkl.h> #include <omp.h> #include <sys/time.h> /* This program demonstrates the OpenMP parallelization of a computationally intensive loop where the work per iteration is allowed to vary. Within the main loop, matrix is generated, the eigenvalue solver DYSEV is called and the largest eigenvalue is saved. DYSEV documentation can be found here: http://www.netlib.org/lapack/explore-html/dd/d4c/dsyev_8f.html Note that this is not necessarily the preferred way to calculate eigenvalues and was used purely as a time consuming example for which the work per iteration could be easily varied. To compile using Intel C++ compiler and linking MKL routine icpc openmp_eigen_dynamic.c -mkl -openmp To run export OMP_NUM_THREADS=N; ./a.out X Y Z where N = number of OpenMP threads X = dimension of array Y = number of iterations (number of eigenvalue problems solved) Z = 'E' for even amount of work per iteration 'U' for uneven amount of work per iteration */ int main(int argc, char **argv) { char choice; int n, niter; double elapsed, *eigmax; struct timeval tv_start, tv_end; // Make sure we use serial version of Intel MKL routine mkl_set_num_threads(1); // Process command line arguments if (argc < 4) { printf("\nThree command line arguments required\n"); printf(" Dimension of array\n"); printf(" Number of iterations\n"); printf(" Choice: 'E' for even / 'U' for uneven work per iteration\n\n"); return(0); } n = atoi(argv[1]); niter = atoi(argv[2]); choice = argv[3][0]; if (choice != 'E' && choice != 'U') { printf("\nThird argument must be 'E' or 'U' for even or uneven\n"); printf("work per iteration, respectively\n\n"); return(0); } // Allocate vector to store results eigmax = (double *) malloc(niter * sizeof(double)); // Solve eigenvalue problem for "niter" random matrices and print largest eigenvector // Get timestamp at start of loop gettimeofday(&tv_start, NULL); #pragma omp parallel for schedule(dynamic, 5) for (int j = 0; j < niter; j++) { int m, lda, info, lwork; double wkopt, *a, *w, *work; // Define the problem size. If choice set to uneven, allow problem // to grow for later iterations if (choice == 'E') { m = n; } else { m = n + j/5; } // Setup work space lda = m; lwork = -1; dsyev("Vectors", "Upper", &m, a, &lda, w, &wkopt, &lwork, &info); lwork = (int)wkopt; // Allocate arrays a = (double *) malloc(m * m * sizeof(double)); w = (double *) malloc(m * sizeof(double)); work = (double*)malloc( lwork*sizeof(double) ); // Initialize array for eigenvalue problem for (int i=0; i< m*m; i++) { a[i] = (double) ((i+j)%17) / (2.0 + j); } // Calculate eigenvalues and save the largest value dsyev("Vectors", "Upper", &m, a, &lda, w, work, &lwork, &info); eigmax[j] = w[m-1]; // Free memory free(a); free(w); free(work); } // Get timestamp at end of loop gettimeofday(&tv_end, NULL); // Calculate elapsed time elapsed = (tv_end.tv_sec - tv_start.tv_sec) + (tv_end.tv_usec - tv_start.tv_usec) / 1000000.0; printf("array dimension = %d\n", n); printf("number of iterations = %d\n", niter); printf("wall time = %f\n", elapsed); // Following code is included to prevent compiler from optimizing // away the eigenvalue calculations. Provides the possibility that the // results will be used. if (choice == 'A') { for (int j = 0; j < niter; j++) { printf("%f\n", eigmax[j]); } } free(eigmax); }
par_gsmg.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Geometrically smooth interpolation multigrid * *****************************************************************************/ #include <stdio.h> #include <math.h> #include "_hypre_parcsr_ls.h" #include "par_amg.h" #include "_hypre_lapack.h" #ifndef ABS #define ABS(x) ((x)>0 ? (x) : -(x)) #endif #ifndef MAX #define MAX(a,b) ((a)>(b)?(a):(b)) #endif static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x) { HYPRE_Real temp = 0.; HYPRE_Int i; for (i=0; i<n; i++) temp = temp + x[i]*x[i]; return sqrt(temp); } static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x) { HYPRE_Int i; for (i=0; i<n; i++) x[i] = a * x[i]; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixFillSmooth * - fill in smooth matrix * - this function will scale the smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples, hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A, HYPRE_Int num_functions, HYPRE_Int *dof_func) { hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j, k, ii, index, start; HYPRE_Int num_cols_offd; HYPRE_Int num_sends; HYPRE_Int *dof_func_offd; HYPRE_Int *int_buf_data; HYPRE_Real temp; HYPRE_Real *p; HYPRE_Real *p_offd; HYPRE_Real *p_ptr; HYPRE_Real *buf_data; HYPRE_Real nm; #if 0 HYPRE_Real mx = 0., my = 1.e+10; #endif /* normalize each sample vector and divide by number of samples */ for (k=0; k<nsamples; k++) { nm = mydnrm2(n, samples+k*n); nm = 1./nm/nsamples; mydscal(n, nm, samples+k*n); } num_cols_offd = hypre_CSRMatrixNumCols(S_offd); num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); p_offd = hypre_CTAlloc(HYPRE_Real, nsamples*num_cols_offd, HYPRE_MEMORY_HOST); p_ptr = p_offd; p = samples; for (k = 0; k < nsamples; k++) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) buf_data[index++] = p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data, p_offd); hypre_ParCSRCommHandleDestroy(comm_handle); p = p+n; p_offd = p_offd+num_cols_offd; } hypre_TFree(buf_data, HYPRE_MEMORY_HOST); if (num_functions > 1) { dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); } for (i = 0; i < n; i++) { for (j = S_diag_i[i]+1; j < S_diag_i[i+1]; j++) { ii = S_diag_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func[ii]) { S_diag_data[j] = 0.; continue; } /* explicit zeros */ if (A_diag_data[j] == 0.) { S_diag_data[j] = 0.; continue; } temp = 0.; p = samples; for (k=0; k<nsamples; k++) { temp = temp + ABS(p[i] - p[ii]); p = p + n; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_diag_data[j] = 0.; continue; } temp = 1./temp; /* reciprocal */ #if 0 my = hypre_min(my,temp); mx = hypre_max(mx,temp); #endif S_diag_data[j] = temp; } for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++) { ii = S_offd_j[j]; /* only interpolate between like functions */ if (num_functions > 1 && dof_func[i] != dof_func_offd[ii]) { S_offd_data[j] = 0.; continue; } /* explicit zeros */ if (A_offd_data[j] == 0.) { S_offd_data[j] = 0.; continue; } temp = 0.; p = samples; p_offd = p_ptr; for (k=0; k<nsamples; k++) { temp = temp + ABS(p[i] - p_offd[ii]); p = p + n; p_offd = p_offd + num_cols_offd; } /* explicit zeros in matrix may cause this */ if (temp == 0.) { S_offd_data[j] = 0.; continue; } temp = 1./temp; /* reciprocal */ #if 0 my = hypre_min(my,temp); mx = hypre_max(mx,temp); #endif S_offd_data[j] = temp; } } #if 0 hypre_printf("MIN, MAX: %f %f\n", my, mx); #endif hypre_TFree(p_ptr, HYPRE_MEMORY_HOST); if (num_functions > 1) hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); return 0; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixChooseThresh *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int i, j; HYPRE_Real mx, minimax = 1.e+10; HYPRE_Real minmin; for (i=0; i<n; i++) { mx = 0.; for (j=S_diag_i[i]; j<S_diag_i[i+1]; j++) mx = hypre_max(mx, S_diag_data[j]); for (j=S_offd_i[i]; j<S_offd_i[i+1]; j++) mx = hypre_max(mx, S_offd_data[j]); if (mx != 0.) minimax = hypre_min(minimax, mx); } hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm); return minmin; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixThreshold *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd); HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int num_nonzeros_diag = A_diag_i[n]; HYPRE_Int num_nonzeros_offd = A_offd_i[n]; HYPRE_Int *S_diag_i; HYPRE_Int *S_diag_j; HYPRE_Real *S_diag_data; HYPRE_Int *S_offd_i; HYPRE_Int *S_offd_j; HYPRE_Real *S_offd_data; HYPRE_Int count, i, jS, jA; /* first count the number of nonzeros we will need */ count = 0; for (i=0; i<num_nonzeros_diag; i++) if (A_diag_data[i] >= thresh) count++; /* allocate vectors */ S_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_diag_i[i] = jS; for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++) { if (A_diag_data[jA] >= thresh) { S_diag_data[jS] = A_diag_data[jA]; S_diag_j[jS] = A_diag_j[jA]; jS++; } } } S_diag_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_diag) = jS; /* free the vectors we don't need */ hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST); hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_diag) = S_diag_i; hypre_CSRMatrixJ(A_diag) = S_diag_j; hypre_CSRMatrixData(A_diag) = S_diag_data; /* * Offd part */ /* first count the number of nonzeros we will need */ count = 0; for (i=0; i<num_nonzeros_offd; i++) if (A_offd_data[i] >= thresh) count++; /* allocate vectors */ S_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST); S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST); jS = 0; for (i = 0; i < n; i++) { S_offd_i[i] = jS; for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++) { if (A_offd_data[jA] >= thresh) { S_offd_data[jS] = A_offd_data[jA]; S_offd_j[jS] = A_offd_j[jA]; jS++; } } } S_offd_i[n] = jS; hypre_CSRMatrixNumNonzeros(A_offd) = jS; /* free the vectors we don't need */ hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST); hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST); /* assign the new vectors */ hypre_CSRMatrixI(A_offd) = S_offd_i; hypre_CSRMatrixJ(A_offd) = S_offd_j; hypre_CSRMatrixData(A_offd) = S_offd_data; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothVecs * - smoother depends on the level being used *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothVecs(void *data, hypre_ParCSRMatrix *A, HYPRE_Int num_sweeps, HYPRE_Int level, HYPRE_Real **SmoothVecs_p) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_ParVector *Zero; hypre_ParVector *Temp; hypre_ParVector *U; hypre_ParVector *Qtemp = NULL; HYPRE_Int i; HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A); HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A); HYPRE_Int sample; HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data); HYPRE_Int ret; HYPRE_Real *datax, *bp, *p; HYPRE_Int rlx_type; HYPRE_Int smooth_type; HYPRE_Int smooth_option = 0; HYPRE_Int smooth_num_levels; HYPRE_Solver *smoother; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); HYPRE_Int num_threads; num_threads = hypre_NumThreads(); if (!comm_pkg) { hypre_MatvecCommPkgCreate(A); comm_pkg = hypre_ParCSRMatrixCommPkg(A); } if (debug_flag >= 1) hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps, nsamples); smooth_type = hypre_ParAMGDataSmoothType(amg_data); smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data); if (smooth_num_levels > level) { smooth_option = smooth_type; smoother = hypre_ParAMGDataSmoother(amg_data); num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data); } rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0]; /* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */ /* omega = hypre_ParAMGDataOmega(amg_data)[level]; */ /* generate par vectors */ Zero = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Zero,0); hypre_ParVectorInitialize(Zero); datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero)); for (i=0; i<n_local; i++) datax[i] = 0.; Temp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(Temp,0); hypre_ParVectorInitialize(Temp); datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp)); for (i=0; i<n_local; i++) datax[i] = 0.; U = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorSetPartitioningOwner(U,0); hypre_ParVectorInitialize(U); datax = hypre_VectorData(hypre_ParVectorLocalVector(U)); if (num_threads > 1) { Qtemp = hypre_ParVectorCreate(comm, n, starts); hypre_ParVectorInitialize(Qtemp); hypre_ParVectorSetPartitioningOwner(Qtemp,0); } /* allocate space for the vectors */ bp = hypre_CTAlloc(HYPRE_Real, nsamples*n_local, HYPRE_MEMORY_HOST); p = bp; /* generate random vectors */ for (sample=0; sample<nsamples; sample++) { for (i=0; i<n_local; i++) datax[i] = hypre_Rand() - .5; for (i=0; i<num_sweeps; i++) { if (smooth_option == 6) { HYPRE_SchwarzSolve(smoother[level], (HYPRE_ParCSRMatrix) A, (HYPRE_ParVector) Zero, (HYPRE_ParVector) U); } else { ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/, rlx_type , 0 /*rel pts*/, 1.0 /*weight*/, 1.0 /*omega*/, NULL, U, Temp, Qtemp); hypre_assert(ret == 0); } } /* copy out the solution */ for (i=0; i<n_local; i++) *p++ = datax[i]; } hypre_ParVectorDestroy(Zero); hypre_ParVectorDestroy(Temp); hypre_ParVectorDestroy(U); if (num_threads > 1) hypre_ParVectorDestroy(Qtemp); *SmoothVecs_p = bp; return 0; } /*-------------------------------------------------------------------------- * CreateSmoothDirs replaces CreateS in AMG * - smoother depends on the level being used * - in this version, CreateSmoothVecs must be called prior to this function *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGCreateSmoothDirs(void *data, hypre_ParCSRMatrix *A, HYPRE_Real *SmoothVecs, HYPRE_Real thresh, HYPRE_Int num_functions, HYPRE_Int *dof_func, hypre_ParCSRMatrix **S_ptr) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data; hypre_ParCSRMatrix *S; HYPRE_Real minimax; HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data); S = hypre_ParCSRMatrixClone(A, 0); /* Traverse S and fill in differences */ hypre_ParCSRMatrixFillSmooth( hypre_ParAMGDataNumSamples(amg_data), SmoothVecs, S, A, num_functions, dof_func); minimax = hypre_ParCSRMatrixChooseThresh(S); if (debug_flag >= 1) hypre_printf("Minimax chosen: %f\n", minimax); /* Threshold and compress */ hypre_ParCSRMatrixThreshold(S, thresh*minimax); *S_ptr = S; return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGNormalizeVecs * * Normalize the smooth vectors and also make the first vector the constant * vector * * inputs: * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * * output: * V = adjusted smooth vectors *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V) { HYPRE_Int i, j; HYPRE_Real nrm; /* change first vector to the constant vector */ for (i=0; i<n; i++) V[i] = 1.0; for (j=0; j<num; j++) { nrm = mydnrm2(n, &V[j*n]); mydscal(n, 1./nrm, &V[j*n]); } return 0; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGFitVectors * * Construct interpolation weights based on fitting smooth vectors * * inputs: * ip = row number of row in P being processed (0-based) * n = length of smooth vectors * num = number of smooth vectors * V = smooth vectors (array of length n*num), also an output * nc = number of coarse grid points * ind = indices of coarse grid points (0-based) * * output: * val = interpolation weights for the coarse grid points * V = smooth vectors; first one has been changed to constant vector; * vectors have also been normalized; this is also an input *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V, HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val) { HYPRE_Real *a, *b; HYPRE_Real *ap; HYPRE_Int i, j; HYPRE_Real *work; HYPRE_Int work_size; HYPRE_Int info; HYPRE_Int temp; /* hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc); for (i=0; i<nc; i++) hypre_printf("%d ", ind[i]); hypre_printf("\n"); */ if (nc == 0) return 0; work_size = 2000*64; work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST); a = hypre_CTAlloc(HYPRE_Real, num*nc, HYPRE_MEMORY_HOST); ap = a; for (j=0; j<nc; j++) { for (i=0; i<num; i++) { *ap = V[i*n+ind[j]]; ap++; } } temp = MAX(nc, num); b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST); for (i=0; i<num; i++) b[i] = V[i*n+ip]; { char trans = 'N'; HYPRE_Int one = 1; hypre_dgels(&trans, &num, &nc, &one, a, &num, b, &temp, work, &work_size, &info); if (info != 0) hypre_error_w_msg(HYPRE_ERROR_GENERIC,"par_gsmg: dgels returned %d\n"); /* copy solution into output vector */ for (j=0; j<nc; j++) val[j] = b[j]; } hypre_TFree(b, HYPRE_MEMORY_HOST); hypre_TFree(a, HYPRE_MEMORY_HOST); hypre_TFree(work, HYPRE_MEMORY_HOST); return info; } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpLS * * Interpolation built from fitting smooth vectors * - sequential version only *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, HYPRE_Int num_smooth, HYPRE_Real *SmoothVecs, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); /* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */ HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); /* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */ HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); /* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */ hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *tmp_map_offd; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; //HYPRE_Real *S_ext_data; //HYPRE_Int *S_ext_i; //HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size; HYPRE_Int P_offd_size; HYPRE_Int *P_marker; /* HYPRE_Int *P_marker_offd; */ HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; /* HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; */ HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int *fine_to_coarse; //HYPRE_BigInt *fine_to_coarse_offd; HYPRE_Int *coarse_counter; HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_Int i,i1; HYPRE_Int j,jl,jj; HYPRE_Int start; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; //HYPRE_BigInt *big_buf_data; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1); //S_ext_i = hypre_CSRMatrixI(S_ext); //S_ext_j = hypre_CSRMatrixBigJ(S_ext); //S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { /* removed */ } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, HYPRE_MEMORY_HOST); big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST);*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { coarse_shift = 0; if (j > 0) coarse_shift = coarse_counter[j-1]; size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) fine_to_coarse[i] += coarse_shift; } /*index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) big_buf_data[index++] = my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data, fine_to_coarse_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n", my_id, wall_time); fflush(NULL); } if (debug_flag==4) wall_time = time_getWallclockSeconds();*/ /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { HYPRE_Int kk; HYPRE_Int indices[1000]; /* kludge */ /* Diagonal part of P */ P_diag_i[i] = jj_counter; kk = 0; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_diag_j[jj_counter] = fine_to_coarse[i1]; jj_counter++; indices[kk] = i1; kk++; } } hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs, kk, indices, &P_diag_data[P_diag_i[i]]); /* Off-Diagonal part of P */ /* undone */ } } } P_diag_i[i] = jj_counter; /* check that this is in right place for threads */ P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size-1); num_cols_P_offd = 1; index = P_marker[0]; for (i=1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); //hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return(0); } /*--------------------------------------------------------------------------- * hypre_BoomerAMGBuildInterpGSMG * * Difference with hypre_BoomerAMGBuildInterp is that S contains values * and is used to build interpolation weights. Matrix A is not used. *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A, HYPRE_Int *CF_marker, hypre_ParCSRMatrix *S, HYPRE_BigInt *num_cpts_global, HYPRE_Int num_functions, HYPRE_Int *dof_func, HYPRE_Int debug_flag, HYPRE_Real trunc_factor, hypre_ParCSRMatrix **P_ptr) { MPI_Comm comm = hypre_ParCSRMatrixComm(S); hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S); hypre_ParCSRCommHandle *comm_handle; hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S); HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag); HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag); hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S); HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd); HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd); HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd); HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); HYPRE_Int *tmp_map_offd = NULL; hypre_ParCSRMatrix *P; HYPRE_BigInt *col_map_offd_P; HYPRE_Int *CF_marker_offd; HYPRE_Int *dof_func_offd = NULL; hypre_CSRMatrix *S_ext; HYPRE_Real *S_ext_data; HYPRE_Int *S_ext_i; HYPRE_BigInt *S_ext_j; hypre_CSRMatrix *P_diag; hypre_CSRMatrix *P_offd; HYPRE_Real *P_diag_data; HYPRE_Int *P_diag_i; HYPRE_Int *P_diag_j; HYPRE_Real *P_offd_data; HYPRE_Int *P_offd_i; HYPRE_Int *P_offd_j; HYPRE_Int P_diag_size, P_offd_size; HYPRE_Int *P_marker, *P_marker_offd; HYPRE_Int jj_counter,jj_counter_offd; HYPRE_Int *jj_count, *jj_count_offd; HYPRE_Int jj_begin_row,jj_begin_row_offd; HYPRE_Int jj_end_row,jj_end_row_offd; HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */ HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag); HYPRE_Int strong_f_marker; HYPRE_Int *fine_to_coarse; HYPRE_Int *coarse_counter; //HYPRE_Int coarse_shift; HYPRE_BigInt total_global_cpts; HYPRE_Int num_cols_P_offd; //HYPRE_BigInt my_first_cpt; HYPRE_BigInt big_i2; HYPRE_Int i,i1,i2; HYPRE_Int j,jl,jj,jj1; HYPRE_Int start; HYPRE_Int c_num; HYPRE_Real sum; HYPRE_Real distribute; HYPRE_Real zero = 0.0; HYPRE_Real one = 1.0; HYPRE_Int my_id; HYPRE_Int num_procs; HYPRE_Int num_threads; HYPRE_Int num_sends; HYPRE_Int index; HYPRE_Int ns, ne, size, rest; HYPRE_Int *int_buf_data; HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S); HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag); HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows; HYPRE_Real wall_time; /* for debugging instrumentation */ hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm,&my_id); num_threads = hypre_NumThreads(); #ifdef HYPRE_NO_GLOBAL_PARTITION //my_first_cpt = num_cpts_global[0]; total_global_cpts = 0; /* we will set this later for the matrix in the setup */ /* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1]; hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/ #else //my_first_cpt = num_cpts_global[my_id]; total_global_cpts = num_cpts_global[num_procs]; #endif /*------------------------------------------------------------------- * Get the CF_marker data for the off-processor columns *-------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (num_functions > 1 && num_cols_S_offd) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); if (!comm_pkg) { hypre_MatvecCommPkgCreate(S); comm_pkg = hypre_ParCSRMatrixCommPkg(S); } num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends), HYPRE_MEMORY_HOST); index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, CF_marker_offd); hypre_ParCSRCommHandleDestroy(comm_handle); if (num_functions > 1) { index = 0; for (i = 0; i < num_sends; i++) { start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++) int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)]; } comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data, dof_func_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n", my_id, wall_time); fflush(NULL); } /*---------------------------------------------------------------------- * Get the ghost rows of S *---------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); if (num_procs > 1) { S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1); S_ext_i = hypre_CSRMatrixI(S_ext); S_ext_j = hypre_CSRMatrixBigJ(S_ext); S_ext_data = hypre_CSRMatrixData(S_ext); } if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * First Pass: Determine size of P and fill in fine_to_coarse mapping. *-----------------------------------------------------------------------*/ /*----------------------------------------------------------------------- * Intialize counters and allocate mapping vector. *-----------------------------------------------------------------------*/ coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST); fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1; jj_counter = start_indexing; jj_counter_offd = start_indexing; /*----------------------------------------------------------------------- * Loop over fine grid. *-----------------------------------------------------------------------*/ /* RDF: this looks a little tricky, but doable */ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_threads; j++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (j < rest) { ns = j*size+j; ne = (j+1)*size+j+1; } else { ns = j*size+rest; ne = (j+1)*size+rest; } for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a C-point, interpolation is the identity. Also set up * mapping vector. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { jj_count[j]++; fine_to_coarse[i] = coarse_counter[j]; coarse_counter[j]++; } /*-------------------------------------------------------------------- * If i is an F-point, interpolation is from the C-points that * strongly influence i. *--------------------------------------------------------------------*/ else { for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; if (CF_marker[i1] >= 0) { jj_count[j]++; } } if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; if (CF_marker_offd[i1] >= 0) { jj_count_offd[j]++; } } } } } } /*----------------------------------------------------------------------- * Allocate arrays. *-----------------------------------------------------------------------*/ for (i=0; i < num_threads-1; i++) { coarse_counter[i+1] += coarse_counter[i]; jj_count[i+1] += jj_count[i]; jj_count_offd[i+1] += jj_count_offd[i]; } i = num_threads-1; jj_counter = jj_count[i]; jj_counter_offd = jj_count_offd[i]; P_diag_size = jj_counter; P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST); P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST); P_diag_i[n_fine] = jj_counter; P_offd_size = jj_counter_offd; P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST); P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST); /*----------------------------------------------------------------------- * Intialize some stuff. *-----------------------------------------------------------------------*/ jj_counter = start_indexing; jj_counter_offd = start_indexing; if (debug_flag==4) { wall_time = time_getWallclockSeconds() - wall_time; hypre_printf("Proc = %d Interp: Internal work 1 = %f\n", my_id, wall_time); fflush(NULL); } /*----------------------------------------------------------------------- * Send and receive fine_to_coarse info. *-----------------------------------------------------------------------*/ if (debug_flag==4) wall_time = time_getWallclockSeconds(); /*----------------------------------------------------------------------- * Loop over fine grid points. *-----------------------------------------------------------------------*/ #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE #endif for (jl = 0; jl < num_threads; jl++) { size = n_fine/num_threads; rest = n_fine - size*num_threads; if (jl < rest) { ns = jl*size+jl; ne = (jl+1)*size+jl+1; } else { ns = jl*size+rest; ne = (jl+1)*size+rest; } jj_counter = 0; if (jl > 0) jj_counter = jj_count[jl-1]; jj_counter_offd = 0; if (jl > 0) jj_counter_offd = jj_count_offd[jl-1]; P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST); for (i = 0; i < n_fine; i++) { P_marker[i] = -1; } for (i = 0; i < num_cols_S_offd; i++) { P_marker_offd[i] = -1; } strong_f_marker = -2; for (i = ns; i < ne; i++) { /*-------------------------------------------------------------------- * If i is a c-point, interpolation is the identity. *--------------------------------------------------------------------*/ if (CF_marker[i] >= 0) { P_diag_i[i] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i]; P_diag_data[jj_counter] = one; jj_counter++; } /*-------------------------------------------------------------------- * If i is an F-point, build interpolation. *--------------------------------------------------------------------*/ else { /* Diagonal part of P */ P_diag_i[i] = jj_counter; jj_begin_row = jj_counter; for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_diag_j * and initialize interpolation weight to zero. *--------------------------------------------------------------*/ if (CF_marker[i1] >= 0) { P_marker[i1] = jj_counter; P_diag_j[jj_counter] = fine_to_coarse[i1]; P_diag_data[jj_counter] = zero; jj_counter++; } /*-------------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *--------------------------------------------------------------*/ else { P_marker[i1] = strong_f_marker; } } jj_end_row = jj_counter; /* Off-Diagonal part of P */ P_offd_i[i] = jj_counter_offd; jj_begin_row_offd = jj_counter_offd; if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*----------------------------------------------------------- * If neighbor i1 is a C-point, set column number in P_offd_j * and initialize interpolation weight to zero. *-----------------------------------------------------------*/ if (CF_marker_offd[i1] >= 0) { P_marker_offd[i1] = jj_counter_offd; P_offd_j[jj_counter_offd] = i1; P_offd_data[jj_counter_offd] = zero; jj_counter_offd++; } /*----------------------------------------------------------- * If neighbor i1 is an F-point, mark it as a strong F-point * whose connection needs to be distributed. *-----------------------------------------------------------*/ else { P_marker_offd[i1] = strong_f_marker; } } } jj_end_row_offd = jj_counter_offd; /* Loop over ith row of S. First, the diagonal part of S */ for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++) { i1 = S_diag_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker[i1] >= jj_begin_row) { P_diag_data[P_marker[i1]] += S_diag_data[jj]; } /*-------------------------------------------------------------- * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *--------------------------------------------------------------*/ else if (P_marker[i1] == strong_f_marker) { sum = zero; /*----------------------------------------------------------- * Loop over row of S for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) sum += S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) sum += S_offd_data[jj1]; } } if (sum != 0) { distribute = S_diag_data[jj] / sum; /*----------------------------------------------------------- * Loop over row of S for point i1 and do the distribution. *-----------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++) { i2 = S_diag_j[jj1]; if (P_marker[i2] >= jj_begin_row) P_diag_data[P_marker[i2]] += distribute * S_diag_data[jj1]; } /* Off-Diagonal block part of row i1 */ if (num_procs > 1) { for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++) { i2 = S_offd_j[jj1]; if (P_marker_offd[i2] >= jj_begin_row_offd) P_offd_data[P_marker_offd[i2]] += distribute * S_offd_data[jj1]; } } } else { /* do nothing */ } } /*-------------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *--------------------------------------------------------------*/ else { /* do nothing */ } } /*---------------------------------------------------------------- * Still looping over ith row of S. Next, loop over the * off-diagonal part of S *---------------------------------------------------------------*/ if (num_procs > 1) { for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++) { i1 = S_offd_j[jj]; /*-------------------------------------------------------------- * Case 1: neighbor i1 is a C-point and strongly influences i, * accumulate a_{i,i1} into the interpolation weight. *--------------------------------------------------------------*/ if (P_marker_offd[i1] >= jj_begin_row_offd) { P_offd_data[P_marker_offd[i1]] += S_offd_data[jj]; } /*------------------------------------------------------------ * Case 2: neighbor i1 is an F-point and strongly influences i, * distribute a_{i,i1} to C-points that strongly infuence i. * Note: currently no distribution to the diagonal in this case. *-----------------------------------------------------------*/ else if (P_marker_offd[i1] == strong_f_marker) { sum = zero; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and calculate the sum * of the connections to c-points that strongly influence i. *---------------------------------------------------------*/ /* find row number */ c_num = S_offd_j[jj]; for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) { /* in the diagonal block */ if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row) sum += S_ext_data[jj1]; } else { /* in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) sum += S_ext_data[jj1]; } } } if (sum != 0) { distribute = S_offd_data[jj] / sum; /*--------------------------------------------------------- * Loop over row of S_ext for point i1 and do * the distribution. *--------------------------------------------------------*/ /* Diagonal block part of row i1 */ for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++) { big_i2 = S_ext_j[jj1]; if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */ { if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row) P_diag_data[P_marker[(HYPRE_Int)(big_i2-col_1)]] += distribute * S_ext_data[jj1]; } else { /* check to see if it is in the off_diagonal block */ j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd); if (j != -1) { if (P_marker_offd[j] >= jj_begin_row_offd) P_offd_data[P_marker_offd[j]] += distribute * S_ext_data[jj1]; } } } } else { /* do nothing */ } } /*----------------------------------------------------------- * Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1} * into the diagonal. *-----------------------------------------------------------*/ else { /* do nothing */ } } } /*----------------------------------------------------------------- * Set interpolation weight by dividing by the diagonal. *-----------------------------------------------------------------*/ sum = 0.; for (jj = jj_begin_row; jj < jj_end_row; jj++) sum += P_diag_data[jj]; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) sum += P_offd_data[jj]; for (jj = jj_begin_row; jj < jj_end_row; jj++) P_diag_data[jj] /= sum; for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++) P_offd_data[jj] /= sum; } strong_f_marker--; P_offd_i[i+1] = jj_counter_offd; } hypre_TFree(P_marker, HYPRE_MEMORY_HOST); hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); } P = hypre_ParCSRMatrixCreate(comm, hypre_ParCSRMatrixGlobalNumRows(S), total_global_cpts, hypre_ParCSRMatrixColStarts(S), num_cpts_global, 0, P_diag_i[n_fine], P_offd_i[n_fine]); P_diag = hypre_ParCSRMatrixDiag(P); hypre_CSRMatrixData(P_diag) = P_diag_data; hypre_CSRMatrixI(P_diag) = P_diag_i; hypre_CSRMatrixJ(P_diag) = P_diag_j; P_offd = hypre_ParCSRMatrixOffd(P); hypre_CSRMatrixData(P_offd) = P_offd_data; hypre_CSRMatrixI(P_offd) = P_offd_i; hypre_CSRMatrixJ(P_offd) = P_offd_j; hypre_ParCSRMatrixOwnsRowStarts(P) = 0; /* Compress P, removing coefficients smaller than trunc_factor * Max */ if (trunc_factor != 0.0) { hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0); P_diag_data = hypre_CSRMatrixData(P_diag); P_diag_i = hypre_CSRMatrixI(P_diag); P_diag_j = hypre_CSRMatrixJ(P_diag); P_offd_data = hypre_CSRMatrixData(P_offd); P_offd_i = hypre_CSRMatrixI(P_offd); P_offd_j = hypre_CSRMatrixJ(P_offd); P_diag_size = P_diag_i[n_fine]; P_offd_size = P_offd_i[n_fine]; } num_cols_P_offd = 0; if (P_offd_size) { P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_marker[i] = P_offd_j[i]; hypre_qsort0(P_marker, 0, P_offd_size-1); num_cols_P_offd = 1; index = P_marker[0]; for (i=1; i < P_offd_size; i++) { if (P_marker[i] > index) { index = P_marker[i]; P_marker[num_cols_P_offd++] = index; } } col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST); tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST); for (i=0; i < num_cols_P_offd; i++) tmp_map_offd[i] = P_marker[i]; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < P_offd_size; i++) P_offd_j[i] = hypre_BinarySearch(tmp_map_offd, P_offd_j[i], num_cols_P_offd); hypre_TFree(P_marker, HYPRE_MEMORY_HOST); } if (num_cols_P_offd) { hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P; hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd; } hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd); *P_ptr = P; hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST); hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST); hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST); hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST); hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST); hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST); hypre_TFree(jj_count, HYPRE_MEMORY_HOST); hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext); return(0); }
WrapOpenMP.h
/** * @file WrapOpenMP.h * @author F. Gratl * @date 4/20/18 * * @details * Provide non-OpenMP versions of the most common OpenMP function calls, * so that they don't have to be wrapped in ifdef-s every time. * * Proper wrapper and renaming necessary, because of -fopenmp-simd handling of * gcc. * * Extend when necessary. */ #pragma once #if defined(AUTOPAS_OPENMP) #include <omp.h> #else #include "ExceptionHandler.h" #endif namespace autopas { #if defined(AUTOPAS_OPENMP) /** * Wrapper for omp_get_thread_num(). * @return Id of the current thread. */ inline int autopas_get_thread_num() { return omp_get_thread_num(); } /** * Wrapper for omp_get_num_thread(). * @return Number of currently active threads. */ inline int autopas_get_num_threads() { return omp_get_num_threads(); } /** * Wrapper for omp_get_max_threads(). * @return Number of threads that can be activated. */ inline int autopas_get_max_threads() { return omp_get_max_threads(); } /** * AutoPasLock for the openmp case, this wraps a omp_lock_t object. To make it copyable, etc. */ class AutoPasLock { public: /** * Default constructor */ AutoPasLock() { omp_init_lock(&_lock); } /** * Move Constructor */ AutoPasLock(AutoPasLock &&) noexcept { omp_init_lock(&_lock); } /** * Copy constructor */ AutoPasLock(const AutoPasLock &) { omp_init_lock(&_lock); } /** * Assignment operator * @return reference to this object after copy */ AutoPasLock &operator=(AutoPasLock) = delete; /** * Destructor */ ~AutoPasLock() { omp_destroy_lock(&_lock); } /** * Acquire the lock. */ void lock() { omp_set_lock(&_lock); } /** * Release the lock. */ void unlock() { omp_unset_lock(&_lock); } private: omp_lock_t _lock; }; /** * Custom reductions: */ // reduction for merging vectors: {1,2} + {2,3} -> {1,2,2,3} #pragma omp declare reduction(vecMerge : std::vector<size_t> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) #pragma omp declare reduction(vecMerge : std::vector<double> : omp_out.insert(omp_out.end(), omp_in.begin(), omp_in.end())) #else /** * Dummy for omp_set_lock() when no OpenMP is available. * @return Always 0. */ inline int autopas_get_thread_num() { return 0; } /** * Dummy for omp_get_num_threads() when no OpenMP is available. * @return Always 1. */ inline int autopas_get_num_threads() { return 1; } /** * Dummy for omp_get_max_threads() when no OpenMP is available. * @return Always 1. */ inline int autopas_get_max_threads() { return 1; } /** * AutoPasLock for the sequential case. */ class AutoPasLock { public: /** * Default constructor */ AutoPasLock() { _locked = false; } /** * Move Constructor */ AutoPasLock(AutoPasLock &&) noexcept { _locked = false; } /** * Copy constructor */ AutoPasLock(AutoPasLock &) { _locked = false; } /** * Assignment operator * @return reference to this object after copy */ AutoPasLock &operator=(AutoPasLock) = delete; /** * Destructor */ ~AutoPasLock() { if (_locked) { utils::ExceptionHandler::exception("AutoPasLocked destroyed in locked state."); } } /** * Acquire the lock. */ void lock() { if (_locked) { utils::ExceptionHandler::exception("Tried to acquire a locked lock."); } _locked = true; } /** * Release the lock. */ void unlock() { if (not _locked) { utils::ExceptionHandler::exception("Tried to release an unlocked lock."); } _locked = false; } private: // true if locked, false if unlocked bool _locked; }; #endif // These properties are needed because we use AutoPasLock in vectors on which we call resize(). static_assert(std::is_default_constructible_v<AutoPasLock>, "AutoPasLock needs to be default constructible!"); static_assert(std::is_move_constructible_v<AutoPasLock>, "AutoPasLock needs to be move constructible!"); } // namespace autopas
double_ptr.c
#include <stdlib.h> void kernel(int dim1, int dim2, int** arr) { int i = 0, j = 0; #pragma omp parallel for for(i = 0; i < dim1; i++) for(j = 0; j < dim2; j++) arr[i][j] = i + j; } int main(int argc, char** argv) { int dim1 = 10, dim2 = 10, i = 0; int** arr = (int**)malloc(sizeof(int*) * dim1); int* arr_hidden = (int*)malloc(sizeof(int) * dim1 * dim2); for(i = 0; i < dim1; i++) arr[i] = &arr_hidden[dim2 * i]; kernel(dim1, dim2, arr); return 0; }
GB_unop__abs_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__abs_fp64_fp64 // op(A') function: GB_unop_tran__abs_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = fabs (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabs (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = fabs (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__abs_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = fabs (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__abs_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GxB_Type_name.c
//------------------------------------------------------------------------------ // GxB_Type_name: return the name of a type //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB.h" GrB_Info GxB_Type_name // return the name of a GraphBLAS type ( char *type_name, // name of the type (char array of size at least // GxB_MAX_NAME_LEN, owned by the user application). GrB_Type type ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GB_WHERE1 ("GxB_Type_name (type_name, type)") ; GB_RETURN_IF_NULL (type_name) ; GB_RETURN_IF_NULL_OR_FAULTY (type) ; //-------------------------------------------------------------------------- // return the type_name //-------------------------------------------------------------------------- memcpy (type_name, type->name, GxB_MAX_NAME_LEN) ; #pragma omp flush return (GrB_SUCCESS) ; }
solucao_omp_do_for.c
/****************************************************************************** * FILE: omp_workshare1.c * DESCRIPTION: * OpenMP Example - Loop Work-sharing - C/C++ Version * In this example, the iterations of a loop are scheduled dynamically * across the team of threads. A thread will perform CHUNK iterations * at a time before being scheduled for the next CHUNK of work. * AUTHOR: Blaise Barney 5/99 * LAST REVISED: 04/06/05 ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define CHUNKSIZE 10 #define N 100 int main (int argc, char *argv[]) { int nthreads, tid, i, chunk; float a[N], b[N], c[N]; /* Some initializations */ for (i=0; i < N; i++) a[i] = b[i] = i * 1.0; chunk = CHUNKSIZE; #pragma omp parallel shared(a,b,c,nthreads,chunk) private(i,tid) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n",tid); #pragma omp for schedule(dynamic,chunk) for (i=0; i<N; i++) { c[i] = a[i] + b[i]; printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); } } /* end of parallel section */ }
LookupTable.h
#ifndef _LOOKUPTABLE_H_ #define _LOOKUPTABLE_H_ /* * LookupTable.h: * Lookup operation, for embeddings * * Created on: Apr 22, 2017 * Author: mszhang */ #include "SparseParam.h" #include "MyLib.h" #include "Alphabet.h" #include "Node.h" #include "Graph.h" #include "ModelUpdate.h" class LookupTable { public: PAlphabet elems; SparseParam E; bool bFineTune; int nDim; int nVSize; int nUNKId; public: LookupTable() { nVSize = 0; nDim = 0; elems = NULL; nUNKId = -1; bFineTune = false; } //random initialization inline void initial(PAlphabet alpha, int dim, bool fineTune = true) { elems = alpha; nVSize = elems->size(); nUNKId = elems->from_string(unknownkey); initialWeights(dim, fineTune); } //initialization by pre-trained embeddings inline bool initial(PAlphabet alpha, const string& inFile, bool fineTune = true, dtype norm = -1) { elems = alpha; nVSize = elems->size(); nUNKId = elems->from_string(unknownkey); return initialWeights(inFile, fineTune, norm); } inline void initialWeights(int dim, bool tune) { if (nVSize == 0 || (nVSize == 1 && nUNKId >= 0)) { std::cout << "please check the alphabet" << std::endl; return; } nDim = dim; E.initial(nDim, nVSize); E.val.random(sqrt(1.0 / nDim)); //E.val.norm2one(); bFineTune = tune; } // default should be fineTune, just for initialization inline bool initialWeights(const string& inFile, bool tune, dtype norm = -1) { if (nVSize == 0 || !elems->is_fixed() || (nVSize == 1 && nUNKId >= 0)) { std::cout << "please check the alphabet" << std::endl; return false; } ifstream inf; if (inf.is_open()) { inf.close(); inf.clear(); } inf.open(inFile.c_str()); if (!inf.is_open()) { std::cout << "please check the input file" << std::endl; return false; } string strLine, curWord; int wordId; vector<string> sLines; sLines.clear(); while (1) { if (!my_getline(inf, strLine)) { break; } if (!strLine.empty()) { sLines.push_back(strLine); } } inf.close(); if (sLines.size() == 0) { return false; } //find the first line, decide the wordDim; vector<string> vecInfo; split_bychar(sLines[0], vecInfo, ' '); nDim = vecInfo.size() - 1; E.initial(nDim, nVSize); E.val = 0; std::cout << "word embedding dim is " << nDim << std::endl; bool bHasUnknown = false; unordered_set<int> indexers; NRVec<dtype> sum(nDim); sum = 0.0; int count = 0; for (int idx = 0; idx < sLines.size(); idx++) { split_bychar(sLines[idx], vecInfo, ' '); if (vecInfo.size() != nDim + 1) { std::cout << "error embedding file" << std::endl; } curWord = vecInfo[0]; //we assume the keys are normalized wordId = elems->from_string(curWord); if (wordId >= 0) { count++; if (nUNKId == wordId) { bHasUnknown = true; } indexers.insert(wordId); for (int idy = 0; idy < nDim; idy++) { dtype curValue = atof(vecInfo[idy + 1].c_str()); sum[idy] += curValue; E.val[wordId][idy] += curValue; } } } if (count == 0) { E.val.random(sqrt(3.0 / nDim)); std::cout << "find no overlapped lexicons in the embedding file" << std::endl; return false; } if (nUNKId >= 0 && !bHasUnknown) { for (int idx = 0; idx < nDim; idx++) { E.val[nUNKId][idx] = sum[idx] / (count + 1); } indexers.insert(nUNKId); count++; std::cout << unknownkey << " not found, using averaged value to initialize." << std::endl; } int oovWords = 0; for (int id = 0; id < nVSize; id++) { if (indexers.find(id) == indexers.end()) { oovWords++; for (int idy = 0; idy < nDim; idy++) { E.val[id][idy] = nUNKId >= 0 ? E.val[nUNKId][idy] : sum[idy] / (count + 1); } } } std::cout << "OOV num is " << oovWords << ", total num is " << nVSize << ", embedding oov ratio is " << oovWords * 1.0 / nVSize << std::endl; std::cout << "unknown id" << nUNKId << std::endl; bFineTune = tune; if (norm > 0) { E.val.norm2one(norm); } return true; } inline void exportAdaParams(ModelUpdate& ada) { if (bFineTune) { ada.addParam(&E); } } inline int getElemId(const string& strFeat) { return elems->from_string(strFeat); } inline void save(std::ofstream &os) const { E.save(os); os << bFineTune << std::endl; os << nDim << std::endl; os << nVSize << std::endl; os << nUNKId << std::endl; } //set alpha directly inline void load(std::ifstream &is, PAlphabet alpha) { E.load(is); is >> bFineTune; is >> nDim; is >> nVSize; is >> nUNKId; elems = alpha; } }; class LookupNode : public Node { public: LookupTable* param; int xid; public: LookupNode() { xid = -1; param = NULL; node_type = "lookup"; } inline void setParam(LookupTable* paramInit) { param = paramInit; } inline void clearValue() { Node::clearValue(); xid = -1; } public: //notice the output //this should be leaf nodes void forward(Graph *cg, const string& strNorm) { assert(param != NULL); xid = param->getElemId(strNorm); if (xid < 0 && param->nUNKId >= 0) { xid = param->nUNKId; } if (param->bFineTune && xid < 0) { std::cout << "Caution: unknown words are not modeled !" << std::endl; } degree = 0; cg->addNode(this); } public: inline PExecute generate(bool bTrain, dtype cur_drop_factor); // better to rewrite for deep understanding inline bool typeEqual(PNode other) { bool result = Node::typeEqual(other); if (!result) return false; LookupNode* conv_other = (LookupNode*)other; if (param != conv_other->param) { return false; } return true; } // for which do no require merge public: void compute() { if (xid >= 0) { param->E.value(xid, val); } else { val.zero(); } } void backward() { assert(param != NULL); if (xid == param->nUNKId || (xid >= 0 && param->bFineTune)) { param->E.loss(xid, loss); } } }; class LookupExecute :public Execute { public: bool bTrain; public: inline void forward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->compute(); batch[idx]->forward_drop(bTrain, drop_factor); } } inline void backward() { int count = batch.size(); //#pragma omp parallel for for (int idx = 0; idx < count; idx++) { batch[idx]->backward_drop(); batch[idx]->backward(); } } }; inline PExecute LookupNode::generate(bool bTrain, dtype cur_drop_factor) { LookupExecute* exec = new LookupExecute(); exec->batch.push_back(this); exec->bTrain = bTrain; exec->drop_factor = cur_drop_factor; return exec; } #endif /*_LOOKUPTABLE_H*/
spectra.c
/** @file cl.c Documented spectra module * * Julien Lesgourgues, 25.08.2010 * * This module computes the anisotropy and Fourier power spectra * \f$ C_l^{X}, P(k), ... \f$'s given the transfer and Bessel functions * (for anisotropy spectra), the source functions (for Fourier spectra) * and the primordial spectra. * * The following functions can be called from other modules: * * -# spectra_init() at the beginning (but after transfer_init()) * -# spectra_cl_at_l() at any time for computing C at any l * -# spectra_spectrum_at_z() at any time for computing P(k) at any z * -# spectra_spectrum_at_k_and z() at any time for computing P at any k and z * -# spectra_free() at the end */ #include "spectra.h" int spectra_bandpower(struct spectra * psp, int l1, int l2, double * TT_II, double * TT_RI, double * TT_RR ) { int l; int index_md; double * cl_tot; double ** cl_md; double ** cl_md_ic; class_alloc(cl_tot,psp->ct_size*sizeof(double),psp->error_message); class_alloc(cl_md,psp->md_size*sizeof(double*),psp->error_message); class_alloc(cl_md_ic,psp->md_size*sizeof(double*),psp->error_message); for (index_md=0;index_md<psp->md_size; index_md++) { class_alloc(cl_md[index_md],psp->ct_size*sizeof(double),psp->error_message); class_alloc(cl_md_ic[index_md],psp->ct_size*psp->ic_ic_size[index_md]*sizeof(double),psp->error_message); } *TT_RR=0.; *TT_RI=0.; *TT_II=0.; for (l=l1; l<=l2; l++) { class_call(spectra_cl_at_l(psp, (double)l, cl_tot, cl_md, cl_md_ic), psp->error_message, psp->error_message); *TT_RR += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,0,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]; *TT_RI += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(0,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]*2.; *TT_II += (double)(2*l+1)*cl_md_ic[psp->index_md_scalars][index_symmetric_matrix(1,1,psp->ic_size[psp->index_md_scalars])*psp->ct_size+psp->index_ct_tt]; } for (index_md=0;index_md<psp->md_size; index_md++) { free(cl_md[index_md]); free(cl_md_ic[index_md]); } free(cl_tot); free(cl_md); free(cl_md_ic); return _SUCCESS_; } /** * Anisotropy power spectra C_l's for all types, modes and initial conditions. * * This routine evaluates all the C_l's at a given value of l by * interpolating in the pre-computed table. When relevant, it also * sums over all initial conditions for each mode, and over all modes. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param l Input: multipole number * @param cl_tot Ouput: total C_l's for all types (TT, TE, EE, etc..) * @param cl_md Ouput: C_l's for all types (TT, TE, EE, etc..) decomposed mode by mode (scalar, tensor, ...) when relevant * @param cl_md_ic Ouput: C_l's for all types (TT, TE, EE, etc..) decomposed by pairs of initial conditions (adiabatic, isocurvatures) for each mode (usually, only for the scalar mode) when relevant * @return the error status */ int spectra_cl_at_l( struct spectra * psp, double l, double * cl_tot, /* array with argument cl_tot[index_ct] (must be already allocated) */ double * * cl_md, /* array with argument cl_md[index_md][index_ct] (must be already allocated only if several modes) */ double * * cl_md_ic /* array with argument cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct] (must be already allocated for a given mode only if several ic's) */ ) { /** Summary: */ /** - define local variables */ int last_index; int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_ct; /** A) treat case in which there is only one mode and one initial condition. Then, only cl_tot needs to be filled. */ if ((psp->md_size == 1) && (psp->ic_size[0] == 1)) { index_md = 0; if ((int)l <= psp->l[psp->l_size[index_md]-1]) { /* interpolate at l */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_tot, psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero for the types such that l<l_max */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_tot[index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; } } /** B) treat case in which there is only one mode with several initial condition. Fill cl_md_ic[index_md=0] and sum it to get cl_tot. */ if ((psp->md_size == 1) && (psp->ic_size[0] > 1)) { index_md = 0; for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (((int)l <= psp->l[psp->l_size[index_md]-1]) && (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_)) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } /* compute cl_tot by summing over cl_md_ic */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) { if (index_ic1 == index_ic2) cl_tot[index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; else cl_tot[index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; } } } } /** C) loop over modes */ if (psp->md_size > 1) { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]=0.; for (index_md = 0; index_md < psp->md_size; index_md++) { /** C.1) treat case in which the mode under consideration has only one initial condition. Fill cl_md[index_md]. */ if (psp->ic_size[index_md] == 1) { if ((int)l <= psp->l[psp->l_size[index_md]-1]) { class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ct_size, l, &last_index, cl_md[index_md], psp->ct_size, psp->error_message), psp->error_message, psp->error_message); for (index_ct=0; index_ct<psp->ct_size; index_ct++) if ((int)l > psp->l_max_ct[index_md][index_ct]) cl_md[index_md][index_ct]=0.; } else { for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_md[index_md][index_ct]=0.; } } /** C.2) treat case in which the mode under consideration has several initial conditions. Fill cl_md_ic[index_md] and sum it to get cl_md[index_md] */ if (psp->ic_size[index_md] > 1) { if ((int)l <= psp->l[psp->l_size[index_md]-1]) { /* interpolate all ic and ct */ class_call(array_interpolate_spline(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ddcl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, l, &last_index, cl_md_ic[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->error_message), psp->error_message, psp->error_message); /* set to zero some of the components */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); for (index_ct=0; index_ct<psp->ct_size; index_ct++) { if (((int)l > psp->l_max_ct[index_md][index_ct]) || (psp->is_non_zero[index_md][index_ic1_ic2] == _FALSE_)) cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } } } } /* if l was too big, set anyway all components to zero */ else { for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); for (index_ct=0; index_ct<psp->ct_size; index_ct++) { cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]=0.; } } } } /* sum up all ic for each mode */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) { cl_md[index_md][index_ct]=0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (index_ic1 == index_ic2) cl_md[index_md][index_ct]+=cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; else cl_md[index_md][index_ct]+=2.*cl_md_ic[index_md][index_ic1_ic2*psp->ct_size+index_ct]; } } } } /** C.3) add contribution of cl_md[index_md] to cl_tot */ for (index_ct=0; index_ct<psp->ct_size; index_ct++) cl_tot[index_ct]+=cl_md[index_md][index_ct]; } } return _SUCCESS_; } /** * Matter power spectrum for arbitrary redshift and for all initial conditions. * * This routine evaluates the matter power spectrum at a given value of z by * interpolating in the pre-computed table (if several values of z have been stored) * or by directly reading it (if it only contains values at z=0 and we want P(k,z=0)) * * * Can be called in two modes: linear or logarithmic. * * - linear: returns P(k) (units: Mpc^3) * * - logarithmic: returns ln(P(k)) * * One little subtlety: in case of several correlated initial conditions, * the cross-correlation spectrum can be negative. Then, in logarithmic mode, * the non-diagonal elements contain the cross-correlation angle P_12/sqrt(P_11 P_22) * (from -1 to 1) instead of ln(P_12) * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Ouput: total matter power spectrum P(k) in Mpc**3 (linear mode), or its logarithms (logarithmic mode) * @param output_ic Ouput: for each pair of initial conditions, matter power spectra P(k) in Mpc**3 (linear mode), or their logarithms and cross-correlation angles (logarithmic mode) * @return the error status */ int spectra_pk_at_z( struct background * pba, struct spectra * psp, enum linear_or_logarithmic mode, double z, double * output_tot, /* array with argument output_tot[index_k] (must be already allocated) */ double * output_ic /* array with argument output_tot[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] (must be already allocated only if more than one initial condition) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; int index_k; double tau,ln_tau; int index_ic1,index_ic2,index_ic1_ic2; index_md = psp->index_md_scalars; /** - first step: convert z into ln(tau) */ class_call(background_tau_of_z(pba,z,&tau), pba->error_message, psp->error_message); class_test(tau <= 0., psp->error_message, "negative or null value of conformal time: cannot interpolate"); ln_tau = log(tau); /** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */ /** (a.) if only values at tau=tau_today are stored and we want P(k,z=0), no need to interpolate */ if (psp->ln_tau_size == 1) { class_test(z != 0., psp->error_message, "asked z=%e but only P(k,z=0) has been tabulated",z); for (index_k=0; index_k<psp->ln_k_size; index_k++) if (psp->ic_size[index_md] == 1) { output_tot[index_k] = psp->ln_pk[index_k]; } else { for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) { output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = psp->ln_pk[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]; } } } /** (b.) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */ else { if (psp->ic_ic_size[index_md] == 1) { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->ln_pk, psp->ddln_pk, psp->ln_k_size, ln_tau, &last_index, output_tot, psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } else { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->ln_pk, psp->ddln_pk, psp->ic_ic_size[index_md]*psp->ln_k_size, ln_tau, &last_index, output_ic, psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } } /** - third step: if there are several initial conditions, compute the total P(k) and set back all uncorrelated coefficients to exactly zero. Check positivity of total P(k). */ if (psp->ic_size[index_md] > 1) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = 0.; for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (index_ic1 == index_ic2) { output_tot[index_k] += exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]); } else { if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { output_tot[index_k] += 2. * output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] * sqrt(exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]) * exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])])); } else output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.; } } } class_test(output_tot[index_k] <= 0., psp->error_message, "for k=%e, z=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total=%e results negative", exp(psp->ln_k[index_k]),z,output_tot[index_k]); } } /** - fourth step: depending on requested mode (linear or logarithmic), apply necessary transformation to the output arrays */ /** (a.) linear mode: if only one initial condition, convert output_pk to linear format; if several initial conditions, convert output_ic to linear format, output_tot is already in this format */ if (mode == linear) { if (psp->ic_size[index_md] == 1) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = exp(output_tot[index_k]); } } else { for (index_k=0; index_k<psp->ln_k_size; index_k++) { for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]); output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2] = exp(output_ic[index_k * psp->ic_ic_size[index_md] + index_ic1_ic2]); } for (index_ic1=0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])] = output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md])] *sqrt(output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])] * output_ic[index_k * psp->ic_ic_size[index_md] + index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]); } } } } } /** (b.) logarithmic mode: if only one initial condition, nothing to be done; if several initial conditions, convert output_tot to logarithmic format, output_ic is already in this format */ else { if (psp->ic_size[index_md] > 1) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { /* we have already checked above that output_tot was positive */ output_tot[index_k] = log(output_tot[index_k]); } } } return _SUCCESS_; } /** * Matter power spectrum for arbitrary wavenumber, redshift and initial condition. * * This routine evaluates the matter power spectrum at a given value of k and z by * interpolating in a table of all P(k)'s computed at this z by spectra_pk_at_z() (when kmin <= k <= kmax), * or eventually by using directly the primordial spectrum (when 0 <= k < kmin): * the latter case is an approximation, valid when kmin << comoving Hubble scale today. * Returns zero when k=0. Returns an error when k<0 or k > kmax. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Ouput: total matter power spectrum P(k) in Mpc**3 * @param pk_ic Ouput: for each pair of initial conditions, matter power spectra P(k) in Mpc**3 * @return the error status */ int spectra_pk_at_k_and_z( struct background * pba, struct primordial * ppm, struct spectra * psp, double k, double z, double * pk_tot, /* pointer to a single number (must be already allocated) */ double * pk_ic /* array of argument pk_ic[index_ic1_ic2] (must be already allocated only if several initial conditions) */ ) { /** Summary: */ /** - define local variables */ int index_md; int index_k; int last_index; int index_ic1,index_ic2,index_ic1_ic2; double * spectrum_at_z = NULL; double * spectrum_at_z_ic = NULL; double * spline; double * pk_primordial_k = NULL; double kmin; double * pk_primordial_kmin = NULL; index_md = psp->index_md_scalars; /** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */ class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])), psp->error_message, "k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1])); /** - deal with case 0 <= k < kmin */ if (k < exp(psp->ln_k[0])) { /** (a.) subcase k=0: then P(k)=0 */ if (k == 0.) { if (psp->ic_size[index_md] == 1) { *pk_tot=0.; } else { for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) { pk_ic[index_ic1_ic2] = 0.; } } } /** (b.) subcase 0<k<kmin: in this case we know that on super-Hubble scales: * P(k) = [some number] * k * P_primordial(k) * so * P(k) = P(kmin) * (k P_primordial(k)) / (kmin P_primordial(kmin)) * (note that the result is accurate only if kmin is such that [a0 kmin] << H0) */ else { /* compute P(k,z) which contains P(kmin,z)*/ class_alloc(spectrum_at_z, psp->ln_k_size*sizeof(double), psp->error_message); if (psp->ic_size[index_md] > 1) { class_alloc(spectrum_at_z_ic, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); } class_call(spectra_pk_at_z(pba, psp, linear, z, spectrum_at_z, spectrum_at_z_ic), psp->error_message, psp->error_message); /* compute P_primordial(k) */ class_alloc(pk_primordial_k, sizeof(double)*psp->ic_ic_size[index_md], psp->error_message); class_call(primordial_spectrum_at_k(ppm, index_md, linear, k, pk_primordial_k), ppm->error_message,psp->error_message); /* compute P_primordial(kmin) */ kmin = exp(psp->ln_k[0]); class_alloc(pk_primordial_kmin, sizeof(double)*psp->ic_ic_size[index_md], psp->error_message); class_call(primordial_spectrum_at_k(ppm, index_md, linear, kmin, pk_primordial_kmin), ppm->error_message, psp->error_message); /* apply above analytic approximation for P(k) */ index_k=0; if (psp->ic_size[index_md] == 1) { index_ic1_ic2 = 0; *pk_tot = spectrum_at_z[index_k] *k*pk_primordial_k[index_ic1_ic2] /kmin/pk_primordial_kmin[index_ic1_ic2]; } else { for (index_ic1_ic2 = 0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) { pk_ic[index_ic1_ic2] = spectrum_at_z_ic[index_ic1_ic2] *k*pk_primordial_k[index_ic1_ic2] /kmin/pk_primordial_kmin[index_ic1_ic2]; } } free(spectrum_at_z); if (psp->ic_size[index_md] > 1) free(spectrum_at_z_ic); free(pk_primordial_k); free(pk_primordial_kmin); } } /** - deal with case kmin <= k <= kmax */ else { /* compute P(k,z) (in logarithmic format for more accurate interpolation) */ class_alloc(spectrum_at_z, psp->ln_k_size*sizeof(double), psp->error_message); if (psp->ic_size[index_md] > 1) { class_alloc(spectrum_at_z_ic, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); } class_call(spectra_pk_at_z(pba, psp, logarithmic, z, spectrum_at_z, spectrum_at_z_ic), psp->error_message, psp->error_message); /* get its second derivatives with spline, then interpolate, then convert to linear format */ class_alloc(spline, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); if (psp->ic_size[index_md] == 1) { class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, spectrum_at_z, 1, spline, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, spectrum_at_z, spline, 1, log(k), &last_index, pk_tot, 1, psp->error_message), psp->error_message, psp->error_message); *pk_tot = exp(*pk_tot); } else { class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, spectrum_at_z_ic, psp->ic_ic_size[index_md], spline, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, spectrum_at_z_ic, spline, psp->ic_ic_size[index_md], log(k), &last_index, pk_ic, psp->ic_ic_size[index_md], psp->error_message), psp->error_message, psp->error_message); for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]); pk_ic[index_ic1_ic2] = exp(pk_ic[index_ic1_ic2]); } for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { pk_ic[index_ic1_ic2] = pk_ic[index_ic1_ic2]* sqrt(pk_ic[index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md])]* pk_ic[index_symmetric_matrix(index_ic2,index_ic2,psp->ic_size[index_md])]); } else { pk_ic[index_ic1_ic2] = 0.; } } } free(spectrum_at_z_ic); } free(spectrum_at_z); free(spline); } /** - last step: if more than one condition, sum over pk_ic to get pk_tot, and set back coefficients of non-correlated pairs to exactly zero. */ if (psp->ic_size[index_md] > 1) { *pk_tot = 0.; for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { if (index_ic1 == index_ic2) *pk_tot += pk_ic[index_ic1_ic2]; else *pk_tot += 2.*pk_ic[index_ic1_ic2]; } else { pk_ic[index_ic1_ic2] = 0.; } } } class_test(*pk_tot <= 0., psp->error_message, "for k=%e, the matrix of initial condition amplitudes was not positive definite, hence P(k)_total results negative",k); } return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary redshift. * * This routine evaluates the non-linear matter power spectrum at a given value of z by * interpolating in the pre-computed table (if several values of z have been stored) * or by directly reading it (if it only contains values at z=0 and we want P(k,z=0)) * * * Can be called in two modes: linear or logarithmic. * * - linear: returns P(k) (units: Mpc^3) * * - logarithmic: returns ln(P(k)) * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param mode Input: linear or logarithmic * @param z Input: redshift * @param output_tot Ouput: total matter power spectrum P(k) in Mpc**3 (linear mode), or its logarithms (logarithmic mode) * @return the error status */ int spectra_pk_nl_at_z( struct background * pba, struct spectra * psp, enum linear_or_logarithmic mode, double z, double * output_tot /* array with argument output_tot[index_k] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int last_index; int index_k; double tau,ln_tau; /** - first step: convert z into ln(tau) */ class_call(background_tau_of_z(pba,z,&tau), pba->error_message, psp->error_message); class_test(tau <= 0., psp->error_message, "negative or null value of conformal time: cannot interpolate"); ln_tau = log(tau); /** - second step: for both modes (linear or logarithmic), store the spectrum in logarithmic format in the output array(s) */ /** (a.) if only values at tau=tau_today are stored and we want P(k,z=0), no need to interpolate */ if (psp->ln_tau_size == 1) { class_test(z != 0., psp->error_message, "asked z=%e but only P(k,z=0) has been tabulated",z); for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = psp->ln_pk_nl[index_k]; } } /** (b.) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */ else { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->ln_pk_nl, psp->ddln_pk_nl, psp->ln_k_size, ln_tau, &last_index, output_tot, psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } /** - fourth step: eventually convert to linear format */ if (mode == linear) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { output_tot[index_k] = exp(output_tot[index_k]); } } return _SUCCESS_; } /** * Non-linear total matter power spectrum for arbitrary wavenumber and redshift. * * This routine evaluates the matter power spectrum at a given value of k and z by * interpolating in a table of all P(k)'s computed at this z by spectra_pk_nl_at_z() (when kmin <= k <= kmax), * or eventually by using directly the primordial spectrum (when 0 <= k < kmin): * the latter case is an approximation, valid when kmin << comoving Hubble scale today. * Returns zero when k=0. Returns an error when k<0 or k > kmax. * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param ppm Input: pointer to primordial structure (used only in the case 0 < k < kmin) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param pk_tot Ouput: total matter power spectrum P(k) in Mpc**3 * @return the error status */ int spectra_pk_nl_at_k_and_z( struct background * pba, struct primordial * ppm, struct spectra * psp, double k, double z, double * pk_tot /* pointer to a single number (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; double * spectrum_at_z = NULL; double * spline; index_md = psp->index_md_scalars; /** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_pk_at_z()) */ class_test((k < exp(psp->ln_k[0])) || (k > exp(psp->ln_k[psp->ln_k_size-1])), psp->error_message, "k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1])); /* compute P(k,z) (in logarithmic format for more accurate interpolation) */ class_alloc(spectrum_at_z, psp->ln_k_size*sizeof(double), psp->error_message); class_call(spectra_pk_nl_at_z(pba, psp, logarithmic, z, spectrum_at_z), psp->error_message, psp->error_message); /* get its second derivatives with spline, then interpolate, then convert to linear format */ class_alloc(spline, sizeof(double)*psp->ic_ic_size[index_md]*psp->ln_k_size, psp->error_message); class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, spectrum_at_z, 1, spline, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, spectrum_at_z, spline, 1, log(k), &last_index, pk_tot, 1, psp->error_message), psp->error_message, psp->error_message); *pk_tot = exp(*pk_tot); free(spectrum_at_z); free(spline); return _SUCCESS_; } /** * Matter transfer functions T_i(k) for arbitrary redshift and for all * initial conditions. * * This routine evaluates the matter transfer functions at a given value of z by * interpolating in the pre-computed table (if several values of z have been stored) * or by directly reading it (if it only contains values at z=0 and we want T_i(k,z=0)) * * * This function can be * called from whatever module at whatever time, provided that * spectra_init() has been called before, and spectra_free() has not * been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param z Input: redshift * @param output Ouput: matter transfer functions * @return the error status */ int spectra_tk_at_z( struct background * pba, struct spectra * psp, double z, double * output /* array with argument output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; int index_k; int index_tr; double tau,ln_tau; int index_ic; index_md = psp->index_md_scalars; /** - first step: convert z into ln(tau) */ class_call(background_tau_of_z(pba,z,&tau), pba->error_message, psp->error_message); class_test(tau <= 0., psp->error_message, "negative or null value of conformal time: cannot interpolate"); ln_tau = log(tau); /** - second step: store the matter transfer functions in the output array */ /** (a.) if only values at tau=tau_today are stored and we want T_i(k,z=0), no need to interpolate */ if (psp->ln_tau_size == 1) { class_test(z != 0., psp->error_message, "asked z=%e but only T_i(k,z=0) has been tabulated",z); for (index_k=0; index_k<psp->ln_k_size; index_k++) for (index_tr=0; index_tr<psp->tr_size; index_tr++) for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) output[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr] = psp->matter_transfer[(index_k*psp->ic_size[index_md]+index_ic)*psp->tr_size+index_tr]; } /** (b.) if several values of tau have been stored, use interpolation routine to get spectra at correct redshift */ else { class_call(array_interpolate_spline(psp->ln_tau, psp->ln_tau_size, psp->matter_transfer, psp->ddmatter_transfer, psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size, ln_tau, &last_index, output, psp->ic_size[index_md]*psp->tr_size*psp->ln_k_size, psp->error_message), psp->error_message, psp->error_message); } return _SUCCESS_; } /** * Matter transfer functions T_i(k) for arbitrary wavenumber, redshift * and initial condition. * * This routine evaluates the matter transfer functions at a given * value of k and z by interpolating in a table of all T_i(k,z)'s * computed at this z by spectra_tk_at_z() (when kmin <= k <= kmax). * Returns an error when k<kmin or k > kmax. * * This function can be called from whatever module at whatever time, * provided that spectra_init() has been called before, and * spectra_free() has not been called yet. * * @param pba Input: pointer to background structure (used for converting z into tau) * @param psp Input: pointer to spectra structure (containing pre-computed table) * @param k Input: wavenumber in 1/Mpc * @param z Input: redshift * @param output Ouput: matter transfer functions * @return the error status */ int spectra_tk_at_k_and_z( struct background * pba, struct spectra * psp, double k, double z, double * output /* array with argument output[index_ic*psp->tr_size+index_tr] (must be already allocated) */ ) { /** Summary: */ /** - define local variables */ int index_md; int last_index; double * tks_at_z; double * ddtks_at_z; index_md = psp->index_md_scalars; /** - first step: check that k is in valid range [0:kmax] (the test for z will be done when calling spectra_tk_at_z()) */ class_test((k < 0.) || (k > exp(psp->ln_k[psp->ln_k_size-1])), psp->error_message, "k=%e out of bounds [%e:%e]",k,0.,exp(psp->ln_k[psp->ln_k_size-1])); /* compute T_i(k,z) */ class_alloc(tks_at_z, psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double), psp->error_message); class_call(spectra_tk_at_z(pba, psp, z, tks_at_z), psp->error_message, psp->error_message); /* get its second derivatives w.r.t. k with spline, then interpolate */ class_alloc(ddtks_at_z, psp->ln_k_size*psp->tr_size*psp->ic_size[index_md]*sizeof(double), psp->error_message); class_call(array_spline_table_lines(psp->ln_k, psp->ln_k_size, tks_at_z, psp->tr_size*psp->ic_size[index_md], ddtks_at_z, _SPLINE_NATURAL_, psp->error_message), psp->error_message, psp->error_message); class_call(array_interpolate_spline(psp->ln_k, psp->ln_k_size, tks_at_z, ddtks_at_z, psp->tr_size*psp->ic_size[index_md], log(k), &last_index, output, psp->tr_size*psp->ic_size[index_md], psp->error_message), psp->error_message, psp->error_message); free(tks_at_z); free(ddtks_at_z); return _SUCCESS_; } /** * This routine initializes the spectra structure (in particular, * computes table of anisotropy and Fourier spectra \f$ C_l^{X}, P(k), ... \f$) * * @param ppr Input : pointer to precision structure * @param pba Input : pointer to background structure (will provide H, Omega_m at redshift of interest) * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfer structure * @param ppm Input : pointer to primordial structure * @param psp Output: pointer to initialized spectra structure * @return the error status */ int spectra_init( struct precision * ppr, struct background * pba, struct perturbs * ppt, struct primordial * ppm, struct nonlinear *pnl, struct transfers * ptr, struct spectra * psp ) { /** Summary: */ double TT_II,TT_RI,TT_RR; int l1,l2; /** - check that we really want to compute at least one spectrum */ if ((ppt->has_cls == _FALSE_) && (ppt->has_pk_matter == _FALSE_) && (ppt->has_density_transfers == _FALSE_) && (ppt->has_velocity_transfers == _FALSE_)) { psp->md_size = 0; if (psp->spectra_verbose > 0) printf("No spectra requested. Spectra module skipped.\n"); return _SUCCESS_; } else { if (psp->spectra_verbose > 0) printf("Computing unlensed linear spectra\n"); } /** - initialize indices and allocate some of the arrays in the spectra structure */ class_call(spectra_indices(pba,ppt,ptr,ppm,psp), psp->error_message, psp->error_message); /** - deal with C_l's, if any */ if (ppt->has_cls == _TRUE_) { class_call(spectra_cls(pba,ppt,ptr,ppm,psp), psp->error_message, psp->error_message); } else { psp->ct_size=0; } /** - deal with P(k,tau) and T_i(k,tau) */ if ((ppt->has_pk_matter == _TRUE_) || (ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) { class_call(spectra_k_and_tau(pba,ppt,psp), psp->error_message, psp->error_message); if (ppt->has_pk_matter == _TRUE_) { class_call(spectra_pk(pba,ppt,ppm,pnl,psp), psp->error_message, psp->error_message); } else { psp->ln_pk=NULL; } if ((ppt->has_density_transfers == _TRUE_) || (ppt->has_velocity_transfers == _TRUE_)) { class_call(spectra_matter_transfers(pba,ppt,psp), psp->error_message, psp->error_message); } else { psp->matter_transfer=NULL; } } else { psp->ln_k_size=0; } /* if there is one isocurvature mode, compute and store in the psp structure the isocurvature contribution to some bandpowers in different ranges of l, and the contribution to the primordial spectrum at different wavenumbers (used in the Planck analysis) */ if ((ppt->has_scalars == _TRUE_) && (ppt->has_cls == _TRUE_) && (ppt->ic_size[ppt->index_md_scalars] == 2)) { l1=2; l2=20; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_2_20=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_2_20=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_2_20=TT_RR/(TT_II+TT_RI+TT_RR); l1=21; l2=200; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_21_200=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_21_200=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_21_200=TT_RR/(TT_II+TT_RI+TT_RR); l1=201; l2=2500; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_201_2500=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_201_2500=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_201_2500=TT_RR/(TT_II+TT_RI+TT_RR); l1=2; l2=2500; class_call(spectra_bandpower(psp,l1,l2,&TT_II,&TT_RI,&TT_RR), psp->error_message, psp->error_message); class_test(TT_II+TT_RI+TT_RR==0., psp->error_message, "should never happen"); psp->alpha_II_2_2500=TT_II/(TT_II+TT_RI+TT_RR); psp->alpha_RI_2_2500=TT_RI/(TT_II+TT_RI+TT_RR); psp->alpha_RR_2_2500=TT_RR/(TT_II+TT_RI+TT_RR); if (ppt->has_cdi==_TRUE_) { psp->alpha_kp=ppm->f_cdi*ppm->f_cdi /(1.+ppm->f_cdi*ppm->f_cdi); psp->alpha_k1=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot)) /(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.002/ppm->k_pivot))); psp->alpha_k2=ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot)) /(1.+ppm->f_cdi*ppm->f_cdi*exp((ppm->n_cdi-ppm->n_s)*log(0.1/ppm->k_pivot))); } if (ppt->has_nid==_TRUE_) { psp->alpha_kp=ppm->f_nid*ppm->f_nid /(1.+ppm->f_nid*ppm->f_nid); psp->alpha_k1=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot)) /(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.002/ppm->k_pivot))); psp->alpha_k2=ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot)) /(1.+ppm->f_nid*ppm->f_nid*exp((ppm->n_nid-ppm->n_s)*log(0.1/ppm->k_pivot))); } if (ppt->has_niv==_TRUE_) { psp->alpha_kp=ppm->f_niv*ppm->f_niv /(1.+ppm->f_niv*ppm->f_niv); psp->alpha_k1=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot)) /(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.002/ppm->k_pivot))); psp->alpha_k2=ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot)) /(1.+ppm->f_niv*ppm->f_niv*exp((ppm->n_niv-ppm->n_s)*log(0.1/ppm->k_pivot))); } } return _SUCCESS_; } /** * This routine frees all the memory space allocated by spectra_init(). * * To be called at the end of each run, only when no further calls to * spectra_cls_at_l(), spectra_pk_at_z(), spectra_pk_at_k_and_z() are needed. * * @param psp Input: pointer to spectra structure (which fields must be freed) * @return the error status */ int spectra_free( struct spectra * psp ) { int index_md; if (psp->md_size > 0) { if (psp->ct_size > 0) { for (index_md = 0; index_md < psp->md_size; index_md++) { free(psp->l_max_ct[index_md]); free(psp->cl[index_md]); free(psp->ddcl[index_md]); } free(psp->l); free(psp->l_size); free(psp->l_max_ct); free(psp->l_max); free(psp->cl); free(psp->ddcl); } if (psp->ln_k_size > 0) { free(psp->ln_tau); free(psp->ln_k); if (psp->ln_pk != NULL) { free(psp->ln_pk); if (psp->ln_tau_size > 1) { free(psp->ddln_pk); } if (psp->ln_pk_nl != NULL) { free(psp->ln_pk_nl); if (psp->ln_tau_size > 1) { free(psp->ddln_pk_nl); } } } if (psp->matter_transfer != NULL) { free(psp->matter_transfer); if (psp->ln_tau_size > 1) { free(psp->ddmatter_transfer); } } } } for (index_md=0; index_md < psp->md_size; index_md++) free(psp->is_non_zero[index_md]); free(psp->is_non_zero); free(psp->ic_size); free(psp->ic_ic_size); return _SUCCESS_; } /** * This routine defines indices and allocates tables in the spectra structure * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param ppm Input : pointer to primordial structure * @param psp Input/output: pointer to spectra structure * @return the error status */ int spectra_indices( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp ){ int index_ct; int index_md; int index_ic1_ic2; int index_tr; psp->md_size = ppt->md_size; if (ppt->has_scalars == _TRUE_) psp->index_md_scalars = ppt->index_md_scalars; class_alloc(psp->ic_size, sizeof(int)*psp->md_size, psp->error_message); class_alloc(psp->ic_ic_size, sizeof(int)*psp->md_size, psp->error_message); class_alloc(psp->is_non_zero, sizeof(short *)*psp->md_size, psp->error_message); for (index_md=0; index_md < psp->md_size; index_md++) { psp->ic_size[index_md] = ppm->ic_size[index_md]; psp->ic_ic_size[index_md] = ppm->ic_ic_size[index_md]; class_alloc(psp->is_non_zero[index_md], sizeof(short)*psp->ic_ic_size[index_md], psp->error_message); for (index_ic1_ic2=0; index_ic1_ic2 < psp->ic_ic_size[index_md]; index_ic1_ic2++) psp->is_non_zero[index_md][index_ic1_ic2] = ppm->is_non_zero[index_md][index_ic1_ic2]; } if (ppt->has_cls == _TRUE_) { /* types of C_l's relevant for both scalars and tensors: TT, EE, TE */ index_ct=0; if (ppt->has_cl_cmb_temperature == _TRUE_) { psp->has_tt = _TRUE_; psp->index_ct_tt=index_ct; index_ct++; } else { psp->has_tt = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_ee = _TRUE_; psp->index_ct_ee=index_ct; index_ct++; } else { psp->has_ee = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_polarization == _TRUE_)) { psp->has_te = _TRUE_; psp->index_ct_te=index_ct; index_ct++; } else { psp->has_te = _FALSE_; } if (ppt->has_cl_cmb_polarization == _TRUE_) { psp->has_bb = _TRUE_; psp->index_ct_bb=index_ct; index_ct++; } else { psp->has_bb = _FALSE_; } /* types of C_l's relevant only for scalars: phi-phi, T-phi, E-phi, d-d, T-d */ if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pp = _TRUE_; psp->index_ct_pp=index_ct; index_ct++; } else { psp->has_pp = _FALSE_; } if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tp = _TRUE_; psp->index_ct_tp=index_ct; index_ct++; } else { psp->has_tp = _FALSE_; } psp->ct_size = index_ct; if ((ppt->has_cl_cmb_polarization == _TRUE_) && (ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ep = _TRUE_; psp->index_ct_ep=index_ct; index_ct++; } else { psp->has_ep = _FALSE_; } if ((ppt->has_scalars == _TRUE_) && ((ppt->has_cl_number_count == _TRUE_) || (ppt->has_cl_lensing_potential == _TRUE_))) psp->d_size=ppt->selection_num; else psp->d_size=0; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dd = _TRUE_; psp->index_ct_dd=index_ct; index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; } else { psp->has_dd = _FALSE_; } /* the computation of C_l^Td would require a very good sampling of transfer functions over a wide range, and a huge computation time. In the current version, we prefer to switch it off, rather than either slowing down the code considerably, or producing very inaccurate spectra. if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_td = _TRUE_; psp->index_ct_td=index_ct; index_ct+=psp->d_size; } else { psp->has_td = _FALSE_; } */ psp->has_td = _FALSE_; if ((ppt->has_cl_cmb_lensing_potential == _TRUE_) && (ppt->has_cl_number_count == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_pd = _TRUE_; psp->index_ct_pd=index_ct; index_ct+=psp->d_size; } else { psp->has_pd = _FALSE_; } psp->has_td = _FALSE_; if ((ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_ll = _TRUE_; psp->index_ct_ll=index_ct; index_ct+=(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; } else { psp->has_ll = _FALSE_; } /* the computation of C_l^Tl would require a very good sampling of transfer functions over a wide range, and a huge computation time. In the current version, we prefer to switch it off, rather than either slowing down the code considerably, or producing very inaccurate spectra. if ((ppt->has_cl_cmb_temperature == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_tl = _TRUE_; psp->index_ct_tl=index_ct; index_ct+=psp->d_size; } else { psp->has_tl = _FALSE_; } */ psp->has_tl = _FALSE_; if ((ppt->has_cl_number_count == _TRUE_) && (ppt->has_cl_lensing_potential == _TRUE_) && (ppt->has_scalars == _TRUE_)) { psp->has_dl = _TRUE_; psp->index_ct_dl=index_ct; index_ct += psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag); } else { psp->has_dl = _FALSE_; } psp->ct_size = index_ct; /* infer from input quantities the l_max for each mode and type, l_max_ct[index_md][index_type]. Maximize it over index_ct, and then over index_md. */ class_alloc(psp->l_max,sizeof(int*)*psp->md_size,psp->error_message); class_alloc(psp->l_max_ct,sizeof(int*)*psp->md_size,psp->error_message); for (index_md=0; index_md<psp->md_size; index_md++) { class_calloc(psp->l_max_ct[index_md],psp->ct_size,sizeof(int),psp->error_message); } if (ppt->has_scalars == _TRUE_) { /* spectra computed up to l_scalar_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tt] = ppt->l_scalar_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ee] = ppt->l_scalar_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_te] = ppt->l_scalar_max; if (psp->has_pp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_pp] = ppt->l_scalar_max; if (psp->has_tp == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_tp] = ppt->l_scalar_max; if (psp->has_ep == _TRUE_) psp->l_max_ct[ppt->index_md_scalars][psp->index_ct_ep] = ppt->l_scalar_max; /* spectra computed up to l_lss_max */ if (psp->has_dd == _TRUE_) for (index_ct=psp->index_ct_dd; index_ct<psp->index_ct_dd+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_td == _TRUE_) for (index_ct=psp->index_ct_td; index_ct<psp->index_ct_td+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_pd == _TRUE_) for (index_ct=psp->index_ct_pd; index_ct<psp->index_ct_pd+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_ll == _TRUE_) for (index_ct=psp->index_ct_ll; index_ct<psp->index_ct_ll+(psp->d_size*(psp->d_size+1)-(psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag))/2; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; if (psp->has_tl == _TRUE_) for (index_ct=psp->index_ct_tl; index_ct<psp->index_ct_tl+psp->d_size; index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = MIN(ppt->l_scalar_max,ppt->l_lss_max); if (psp->has_dl == _TRUE_) for (index_ct=psp->index_ct_dl; index_ct < psp->index_ct_dl+(psp->d_size*psp->d_size - (psp->d_size-psp->non_diag)*(psp->d_size-1-psp->non_diag)); index_ct++) psp->l_max_ct[ppt->index_md_scalars][index_ct] = ppt->l_lss_max; } if (ppt->has_tensors == _TRUE_) { /* spectra computed up to l_tensor_max */ if (psp->has_tt == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_tt] = ppt->l_tensor_max; if (psp->has_ee == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_ee] = ppt->l_tensor_max; if (psp->has_te == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_te] = ppt->l_tensor_max; if (psp->has_bb == _TRUE_) psp->l_max_ct[ppt->index_md_tensors][psp->index_ct_bb] = ppt->l_tensor_max; } /* maximizations */ psp->l_max_tot = 0.; for (index_md=0; index_md < psp->md_size; index_md++) { psp->l_max[index_md] = 0.; for (index_ct=0.; index_ct<psp->ct_size; index_ct++) psp->l_max[index_md] = MAX(psp->l_max[index_md],psp->l_max_ct[index_md][index_ct]); psp->l_max_tot = MAX(psp->l_max_tot,psp->l_max[index_md]); } } /* indices for species associated with a matter transfer function in Fourier space */ index_tr=0; class_define_index(psp->index_tr_delta_g,ppt->has_source_delta_g,index_tr,1); class_define_index(psp->index_tr_delta_b,ppt->has_source_delta_b,index_tr,1); class_define_index(psp->index_tr_delta_cdm,ppt->has_source_delta_cdm,index_tr,1); class_define_index(psp->index_tr_delta_dcdm,ppt->has_source_delta_dcdm,index_tr,1); class_define_index(psp->index_tr_delta_scf,ppt->has_source_delta_scf,index_tr,1); class_define_index(psp->index_tr_delta_fld,ppt->has_source_delta_fld,index_tr,1); class_define_index(psp->index_tr_delta_ur,ppt->has_source_delta_ur,index_tr,1); class_define_index(psp->index_tr_delta_dr,ppt->has_source_delta_dr,index_tr,1); class_define_index(psp->index_tr_delta_ncdm1,ppt->has_source_delta_ncdm,index_tr,pba->N_ncdm); class_define_index(psp->index_tr_delta_tot,ppt->has_density_transfers,index_tr,1); /* indices for species associated with a velocity transfer function in Fourier space */ class_define_index(psp->index_tr_theta_g,ppt->has_source_theta_g,index_tr,1); class_define_index(psp->index_tr_theta_b,ppt->has_source_theta_b,index_tr,1); class_define_index(psp->index_tr_theta_cdm,ppt->has_source_theta_cdm,index_tr,1); class_define_index(psp->index_tr_theta_dcdm,ppt->has_source_theta_dcdm,index_tr,1); class_define_index(psp->index_tr_theta_scf,ppt->has_source_theta_scf,index_tr,1); class_define_index(psp->index_tr_theta_fld,ppt->has_source_theta_fld,index_tr,1); class_define_index(psp->index_tr_theta_ur,ppt->has_source_theta_ur,index_tr,1); class_define_index(psp->index_tr_theta_dr,ppt->has_source_theta_ur,index_tr,1); class_define_index(psp->index_tr_theta_ncdm1,ppt->has_source_theta_ncdm,index_tr,pba->N_ncdm); class_define_index(psp->index_tr_theta_tot,ppt->has_velocity_transfers,index_tr,1); psp->tr_size = index_tr; return _SUCCESS_; } /** * This routine computes a table of values for all harmonic spectra C_l's, * given the transfer functions and primordial spectra. * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param ppm Input : pointer to primordial structure * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_cls( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_l; int index_ct; int cl_integrand_num_columns; double * cl_integrand; /* array with argument cl_integrand[index_k*cl_integrand_num_columns+1+psp->index_ct] */ double * transfer_ic1; /* array with argument transfer_ic1[index_tt] */ double * transfer_ic2; /* idem */ double * primordial_pk; /* array with argument primordial_pk[index_ic_ic]*/ /* This code can be optionally compiled with the openmp option for parallel computation. Inside parallel regions, the use of the command "return" is forbidden. For error management, instead of "return _FAILURE_", we will set the variable below to "abort = _TRUE_". This will lead to a "return _FAILURE_" jus after leaving the parallel region. */ int abort; #ifdef _OPENMP /* instrumentation times */ double tstart, tstop; #endif /** - allocate pointers to arrays where results will be stored */ class_alloc(psp->l_size,sizeof(int)*psp->md_size,psp->error_message); class_alloc(psp->cl,sizeof(double *)*psp->md_size,psp->error_message); class_alloc(psp->ddcl,sizeof(double *)*psp->md_size,psp->error_message); psp->l_size_max = ptr->l_size_max; class_alloc(psp->l,sizeof(double)*psp->l_size_max,psp->error_message); /** - store values of l */ for (index_l=0; index_l < psp->l_size_max; index_l++) { psp->l[index_l] = (double)ptr->l[index_l]; } /** - loop over modes (scalar, tensors, etc). For each mode: */ for (index_md = 0; index_md < psp->md_size; index_md++) { /** - a) store number of l values for this mode */ psp->l_size[index_md] = ptr->l_size[index_md]; /** - b) allocate arrays where results will be stored */ class_alloc(psp->cl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message); class_alloc(psp->ddcl[index_md],sizeof(double)*psp->l_size[index_md]*psp->ct_size*psp->ic_ic_size[index_md],psp->error_message); cl_integrand_num_columns = 1+psp->ct_size*2; /* one for k, ct_size for each type, ct_size for each second derivative of each type */ /** d) loop over initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); /* non-diagonal coefficients should be computed only if non-zero correlation */ if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { /* initialize error management flag */ abort = _FALSE_; /* beginning of parallel region */ #pragma omp parallel \ shared(ptr,ppm,index_md,psp,ppt,cl_integrand_num_columns,index_ic1,index_ic2,abort) \ private(tstart,cl_integrand,primordial_pk,transfer_ic1,transfer_ic2,index_l,tstop) { #ifdef _OPENMP tstart = omp_get_wtime(); #endif class_alloc_parallel(cl_integrand, ptr->q_size*cl_integrand_num_columns*sizeof(double), psp->error_message); class_alloc_parallel(primordial_pk, psp->ic_ic_size[index_md]*sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic1, ptr->tt_size[index_md]*sizeof(double), psp->error_message); class_alloc_parallel(transfer_ic2, ptr->tt_size[index_md]*sizeof(double), psp->error_message); #pragma omp for schedule (dynamic) /** - loop over l values defined in the transfer module. For each l, compute the C_l's for all types (TT, TE, ...) by convolving primordial spectra with transfer functions. This elementary task is assigned to spectra_compute_cl() */ for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) { #pragma omp flush(abort) class_call_parallel(spectra_compute_cl(pba, ppt, ptr, ppm, psp, index_md, index_ic1, index_ic2, index_l, cl_integrand_num_columns, cl_integrand, primordial_pk, transfer_ic1, transfer_ic2), psp->error_message, psp->error_message); } /* end of loop over l */ #ifdef _OPENMP tstop = omp_get_wtime(); if (psp->spectra_verbose > 1) printf("In %s: time spent in parallel region (loop over l's) = %e s for thread %d\n", __func__,tstop-tstart,omp_get_thread_num()); #endif free(cl_integrand); free(primordial_pk); free(transfer_ic1); free(transfer_ic2); } /* end of parallel region */ if (abort == _TRUE_) return _FAILURE_; } else { /* set non-diagonal coefficients to zero if pair of ic's uncorrelated */ for (index_l=0; index_l < ptr->l_size[index_md]; index_l++) { for (index_ct=0; index_ct<psp->ct_size; index_ct++) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } } } } } /** - e) now that for a given mode, all possible C_l's have been computed, compute second derivative of the array in which they are stored, in view of spline interpolation. */ class_call(array_spline_table_lines(psp->l, psp->l_size[index_md], psp->cl[index_md], psp->ic_ic_size[index_md]*psp->ct_size, psp->ddcl[index_md], _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } return _SUCCESS_; } /** * This routine computes the C_l's for a given mode, pair of initial conditions * and multipole, but for all types (TT, TE...), by convolving the * transfer functions with the primordial spectra. * * @param ppt Input : pointer to perturbation structure * @param ptr Input : pointer to transfers structure * @param ppm Input : pointer to primordial structure * @param psp Input/Output: pointer to spectra structure (result stored here) * @param index_md Input : index of mode under consideration * @param index_ic1 Input : index of first initial condition in the correlator * @param index_ic2 Input : index of second initial condition in the correlato * @param index_l Input : index of multipole under consideration * @param cl_integrand_num_column Input : number of columns in cl_integrand * @param cl_integrand Input : an allocated workspace * @param primordial_pk Input : table of primordial spectrum values * @param transfer_ic1 Input : table of transfer function values for first initial condition * @param transfer_ic2 Input : table of transfer function values for second initial condition * @return the error status */ int spectra_compute_cl( struct background * pba, struct perturbs * ppt, struct transfers * ptr, struct primordial * ppm, struct spectra * psp, int index_md, int index_ic1, int index_ic2, int index_l, int cl_integrand_num_columns, double * cl_integrand, double * primordial_pk, double * transfer_ic1, double * transfer_ic2 ) { int index_q; int index_tt; int index_ct; int index_d1,index_d2; double k; double clvalue; int index_ic1_ic2; double transfer_ic1_temp=0.; double transfer_ic2_temp=0.; double * transfer_ic1_nc=NULL; double * transfer_ic2_nc=NULL; double factor; int index_q_spline=0; index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (ppt->has_cl_number_count == _TRUE_) { class_alloc(transfer_ic1_nc,psp->d_size*sizeof(double),psp->error_message); class_alloc(transfer_ic2_nc,psp->d_size*sizeof(double),psp->error_message); } for (index_q=0; index_q < ptr->q_size; index_q++) { //q = ptr->q[index_q]; k = ptr->k[index_md][index_q]; cl_integrand[index_q*cl_integrand_num_columns+0] = k; class_call(primordial_spectrum_at_k(ppm,index_md,linear,k,primordial_pk), ppm->error_message, psp->error_message); /* above routine checks that k>0: no possible division by zero below */ for (index_tt=0; index_tt < ptr->tt_size[index_md]; index_tt++) { transfer_ic1[index_tt] = ptr->transfer[index_md] [((index_ic1 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; if (index_ic1 == index_ic2) { transfer_ic2[index_tt] = transfer_ic1[index_tt]; } else { transfer_ic2[index_tt] = ptr->transfer[index_md] [((index_ic2 * ptr->tt_size[index_md] + index_tt) * ptr->l_size[index_md] + index_l) * ptr->q_size + index_q]; } } /* define combinations of transfer functions */ if (ppt->has_cl_cmb_temperature == _TRUE_) { if (_scalars_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t0] + transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t0] + transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_vectors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t1] + transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t1] + transfer_ic2[ptr->index_tt_t2]; } if (_tensors_) { transfer_ic1_temp = transfer_ic1[ptr->index_tt_t2]; transfer_ic2_temp = transfer_ic2[ptr->index_tt_t2]; } } if (ppt->has_cl_number_count == _TRUE_) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { transfer_ic1_nc[index_d1] = 0.; transfer_ic2_nc[index_d1] = 0.; if (ppt->has_nc_density == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_density+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_density+index_d1]; } if (ppt->has_nc_rsd == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_rsd+index_d1] + transfer_ic1[ptr->index_tt_d0+index_d1] + transfer_ic1[ptr->index_tt_d1+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_rsd+index_d1] + transfer_ic2[ptr->index_tt_d0+index_d1] + transfer_ic2[ptr->index_tt_d1+index_d1]; } if (ppt->has_nc_lens == _TRUE_) { transfer_ic1_nc[index_d1] += psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic1[ptr->index_tt_nc_lens+index_d1]; transfer_ic2_nc[index_d1] += psp->l[index_l]*(psp->l[index_l]+1.)*transfer_ic2[ptr->index_tt_nc_lens+index_d1]; } if (ppt->has_nc_gr == _TRUE_) { transfer_ic1_nc[index_d1] += transfer_ic1[ptr->index_tt_nc_g1+index_d1] + transfer_ic1[ptr->index_tt_nc_g2+index_d1] + transfer_ic1[ptr->index_tt_nc_g3+index_d1] + transfer_ic1[ptr->index_tt_nc_g4+index_d1] + transfer_ic1[ptr->index_tt_nc_g5+index_d1]; transfer_ic2_nc[index_d1] += transfer_ic2[ptr->index_tt_nc_g1+index_d1] + transfer_ic2[ptr->index_tt_nc_g2+index_d1] + transfer_ic2[ptr->index_tt_nc_g3+index_d1] + transfer_ic2[ptr->index_tt_nc_g4+index_d1] + transfer_ic2[ptr->index_tt_nc_g5+index_d1]; } } } /* integrand of Cl's */ /* note: we must integrate C_l = int [4 pi dk/k calP(k) Delta1_l(q) Delta2_l(q)] where calP(k) is the dimensionless power spectrum equal to a constant in the scale-invariant case, and to P(k) = A_s k^(ns-1) otherwise and q=sqrt(k2+K) (scalars) or sqrt(k2+2K) (vectors) or sqrt(k2+3K) (tensors) In the literature, people often rewrite the integral in terms of q and absorb the Jacobian of the change of variables in a redefinition of the primodial spectrum. Let us illustrate this for scalars: dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-K)] = q2dq * 1/[q(q2-K)] This factor 1/[q(q2-K)] is commonly absorbed in the definition of calP. Then one would have C_l = int [4 pi q2 dq {A_s k^(ns-1)/[q(q2-K)]} Delta1_l(q) Delta2_l(q)] Sometimes in the literature, the factor (k2-3K)=(q2-4K) present in the initial conditions of scalar transfer functions (if normalized to curvature R=1) is also absorbed in the definition of the power spectrum. Then the curvature power spectrum reads calP = (q2-4K)/[q(q2-K)] * (k/k)^ns In CLASS we prefer to define calP = (k/k)^ns like in the flat case, to have the factor (q2-4K) in the initialk conditions, and the factor 1/[q(q2-K)] doesn't need to be there since we integrate over dk/k. For tensors, the change of variable described above gives a slightly different result: dk/k = kdk/k2 = qdq/k2 = dq/q * (q/k)^2 = dq/q * [q2/(q2-3K)] = q2dq * 1/[q(q2-3K)] But for tensors there are extra curvature-related correction factors to take into account. See the comments in the perturbation module, related to initial conditions for tensors. */ factor = 4. * _PI_ / k; if (psp->has_tt == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tt]= primordial_pk[index_ic1_ic2] * transfer_ic1_temp * transfer_ic2_temp * factor; if (psp->has_ee == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ee]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_e] * factor; if (psp->has_te == _TRUE_) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_te]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_e] + transfer_ic1[ptr->index_tt_e] * transfer_ic2_temp) * factor; if (_tensors_ && (psp->has_bb == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_bb]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_b] * transfer_ic2[ptr->index_tt_b] * factor; if (_scalars_ && (psp->has_pp == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pp]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_lcmb] * factor; if (_scalars_ && (psp->has_tp == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tp]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_temp) * factor; if (_scalars_ && (psp->has_ep == _TRUE_)) cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ep]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1[ptr->index_tt_e] * transfer_ic2[ptr->index_tt_lcmb] + transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2[ptr->index_tt_e]) * factor; if (_scalars_ && (psp->has_dd == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dd+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2_nc[index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_td == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_td+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_pd == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_pd+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1[ptr->index_tt_lcmb] * transfer_ic2_nc[index_d1] + transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lcmb]) * factor; } } if (_scalars_ && (psp->has_ll == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=index_d1; index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_ll+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2] * factor; index_ct++; } } } if (_scalars_ && (psp->has_tl == _TRUE_)) { for (index_d1=0; index_d1<psp->d_size; index_d1++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_tl+index_d1]= primordial_pk[index_ic1_ic2] * 0.5*(transfer_ic1_temp * transfer_ic2[ptr->index_tt_lensing+index_d1] + transfer_ic1[ptr->index_tt_lensing+index_d1] * transfer_ic2_temp) * factor; } } if (_scalars_ && (psp->has_dl == _TRUE_)) { index_ct=0; for (index_d1=0; index_d1<psp->d_size; index_d1++) { for (index_d2=MAX(index_d1-psp->non_diag,0); index_d2<=MIN(index_d1+psp->non_diag,psp->d_size-1); index_d2++) { cl_integrand[index_q*cl_integrand_num_columns+1+psp->index_ct_dl+index_ct]= primordial_pk[index_ic1_ic2] * transfer_ic1_nc[index_d1] * transfer_ic2[ptr->index_tt_lensing+index_d2] * factor; index_ct++; } } } } for (index_ct=0; index_ct<psp->ct_size; index_ct++) { /* treat null spectra (C_l^BB of scalars, C_l^pp of tensors, etc. */ if ((_scalars_ && (psp->has_bb == _TRUE_) && (index_ct == psp->index_ct_bb)) || (_tensors_ && (psp->has_pp == _TRUE_) && (index_ct == psp->index_ct_pp)) || (_tensors_ && (psp->has_tp == _TRUE_) && (index_ct == psp->index_ct_tp)) || (_tensors_ && (psp->has_ep == _TRUE_) && (index_ct == psp->index_ct_ep)) || (_tensors_ && (psp->has_dd == _TRUE_) && (index_ct == psp->index_ct_dd)) || (_tensors_ && (psp->has_td == _TRUE_) && (index_ct == psp->index_ct_td)) || (_tensors_ && (psp->has_pd == _TRUE_) && (index_ct == psp->index_ct_pd)) || (_tensors_ && (psp->has_ll == _TRUE_) && (index_ct == psp->index_ct_ll)) || (_tensors_ && (psp->has_tl == _TRUE_) && (index_ct == psp->index_ct_tl)) || (_tensors_ && (psp->has_dl == _TRUE_) && (index_ct == psp->index_ct_dl)) ) { psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = 0.; } /* for non-zero spectra, integrate over q */ else { /* spline the integrand over the whole range of k's */ class_call(array_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, 0, 1+index_ct, 1+psp->ct_size+index_ct, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); /* Technical point: we will now do a spline integral over the whole range of k's, excepted in the closed (K>0) case. In that case, it is a bad idea to spline over the values of k corresponding to nu<nu_flat_approximation. In this region, nu values are integer values, so the steps dq and dk have some discrete jumps. This makes the spline routine less accurate than a trapezoidal integral with finer sampling. So, in the closed case, we set index_q_spline to ptr->index_q_flat_approximation, to tell the integration routine that below this index, it should treat the integral as a trapezoidal one. For testing, one is free to set index_q_spline to 0, to enforce spline integration everywhere, or to (ptr->q_size-1), to enforce trapezoidal integration everywhere. */ if (pba->sgnK == 1) { index_q_spline = ptr->index_q_flat_approximation; } class_call(array_integrate_all_trapzd_or_spline(cl_integrand, cl_integrand_num_columns, ptr->q_size, index_q_spline, 0, 1+index_ct, 1+psp->ct_size+index_ct, &clvalue, psp->error_message), psp->error_message, psp->error_message); /* in the closed case, instead of an integral, we have a discrete sum. In practise, this does not matter: the previous routine does give a correct approximation of the discrete sum, both in the trapezoidal and spline regions. The only error comes from the first point: the previous routine assumes a weight for the first point which is too small compared to what it would be in the an actual discrete sum. The line below correct this problem in an exact way. */ if (pba->sgnK == 1) { clvalue += cl_integrand[1+index_ct] * ptr->q[0]/ptr->k[0][0]*sqrt(pba->K)/2.; } /* we have the correct C_l now. We can store it in the transfer structure. */ psp->cl[index_md] [(index_l * psp->ic_ic_size[index_md] + index_ic1_ic2) * psp->ct_size + index_ct] = clvalue; } } if (ppt->has_cl_number_count == _TRUE_) { free(transfer_ic1_nc); free(transfer_ic2_nc); } return _SUCCESS_; } /** * This routine computes the values of k and tau at which the matter * power spectra P(k,tau) and the matter transfer functions T_i(k,tau) * will be stored. * * @param pba Input : pointer to background structure (for z to tau conversion) * @param ppt Input : pointer to perturbation structure (contain source functions) * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_k_and_tau( struct background * pba, struct perturbs * ppt, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_k; int index_tau; double tau_min; /** - check the presence of scalar modes */ class_test((ppt->has_scalars == _FALSE_), psp->error_message, "you cannot ask for matter power spectrum since you turned off scalar modes"); /** - check the maximum redshift z_max_pk at which P(k,z) and T_i(k,z) should be computable by interpolation. If it is equal to zero, only P(k,z=0) needs to be computed. If it is higher, we will store in a table various P(k,tau) at several values of tau generously encompassing the range 0<z<z_max_pk */ /* if z_max_pk<0, return error */ class_test((psp->z_max_pk < 0), psp->error_message, "asked for negative redshift z=%e",psp->z_max_pk); /* if z_max_pk=0, there is just one value to store */ if (psp->z_max_pk == 0.) { psp->ln_tau_size=1; } /* if z_max_pk>0, store several values (with a confortable margin above z_max_pk) in view of interpolation */ else{ /* find the first relevant value of tau (last value in the table tau_ampling before tau(z_max)) and infer the number of values of tau at which P(k) must be stored */ class_call(background_tau_of_z(pba,psp->z_max_pk,&tau_min), pba->error_message, psp->error_message); index_tau=0; class_test((tau_min < ppt->tau_sampling[index_tau]), psp->error_message, "you asked for zmax=%e, i.e. taumin=%e, smaller than first possible value =%e",psp->z_max_pk,tau_min,ppt->tau_sampling[0]); while (ppt->tau_sampling[index_tau] < tau_min){ index_tau++; } index_tau --; /* whenever possible, take a few more values in to avoid boundary effects in the interpolation */ if (index_tau>0) index_tau--; if (index_tau>0) index_tau--; if (index_tau>0) index_tau--; if (index_tau>0) index_tau--; psp->ln_tau_size=ppt->tau_size-index_tau; } /** - allocate and fill table of tau values at which P(k,tau) and T_i(k,tau) are stored */ class_alloc(psp->ln_tau,sizeof(double)*psp->ln_tau_size,psp->error_message); for (index_tau=0; index_tau<psp->ln_tau_size; index_tau++) { psp->ln_tau[index_tau]=log(ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size]); } /** - allocate and fill table of k values at which P(k,tau) is stored */ psp->ln_k_size = ppt->k_size[ppt->index_md_scalars]; class_alloc(psp->ln_k,sizeof(double)*psp->ln_k_size,psp->error_message); for (index_k=0; index_k<psp->ln_k_size; index_k++) { class_test(ppt->k[ppt->index_md_scalars][index_k] <= 0., psp->error_message, "stop to avoid segmentation fault"); psp->ln_k[index_k]=log(ppt->k[ppt->index_md_scalars][index_k]); } return _SUCCESS_; } /** * This routine computes a table of values for all matter power spectra P(k), * given the source functions and primordial spectra. * * @param pba Input : pointer to background structure (will provide H, Omega_m at redshift of interest) * @param ppt Input : pointer to perturbation structure (contain source functions) * @param ppm Input : pointer to primordial structure * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_pk( struct background * pba, struct perturbs * ppt, struct primordial * ppm, struct nonlinear *pnl, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic1,index_ic2,index_ic1_ic2; int index_k; int index_tau; double * primordial_pk; /* array with argument primordial_pk[index_ic_ic] */ double source_ic1; double source_ic2; double ln_pk_tot; /** - check the presence of scalar modes */ class_test((ppt->has_scalars == _FALSE_), psp->error_message, "you cannot ask for matter power spectrum since you turned off scalar modes"); index_md = psp->index_md_scalars; /** - allocate temporary vectors where the primordial spectrum and the background quantitites will be stored */ class_alloc(primordial_pk,psp->ic_ic_size[index_md]*sizeof(double),psp->error_message); /** - allocate and fill array of P(k,tau) values */ class_alloc(psp->ln_pk, sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md], psp->error_message); if (pnl->method != nl_none) { class_alloc(psp->ln_pk_nl, sizeof(double)*psp->ln_tau_size*psp->ln_k_size, psp->error_message); } else { psp->ln_pk_nl = NULL; } for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { class_call(primordial_spectrum_at_k(ppm,index_md,logarithmic,psp->ln_k[index_k],primordial_pk), ppm->error_message, psp->error_message); ln_pk_tot =0; /* curvature primordial spectrum: P_R(k) = 1/(2pi^2) k^3 <R R> so, primordial curvature correlator: <R R> = (2pi^2) k^-3 P_R(k) so, delta_m correlator: P(k) = <delta_m delta_m> = (2pi^2) k^-3 (source_m)^2 P_R(k) For isocurvature or cross adiabatic-isocurvature parts, replace one or two 'R' by 'S_i's */ /* part diagonal in initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic1,psp->ic_size[index_md]); source_ic1 = ppt->sources[index_md] [index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = log(2.*_PI_*_PI_/exp(3.*psp->ln_k[index_k]) *source_ic1*source_ic1 *exp(primordial_pk[index_ic1_ic2])); ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2]; } /* part non-diagonal in initial conditions */ for (index_ic1 = 0; index_ic1 < psp->ic_size[index_md]; index_ic1++) { for (index_ic2 = index_ic1+1; index_ic2 < psp->ic_size[index_md]; index_ic2++) { index_ic1_ic2 = index_symmetric_matrix(index_ic1,index_ic2,psp->ic_size[index_md]); if (psp->is_non_zero[index_md][index_ic1_ic2] == _TRUE_) { source_ic1 = ppt->sources[index_md] [index_ic1 * ppt->tp_size[index_md] + ppt->index_tp_delta_m] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; source_ic2 = ppt->sources[index_md] [index_ic2 * ppt->tp_size[index_md] + ppt->index_tp_delta_m] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = primordial_pk[index_ic1_ic2]*SIGN(source_ic1)*SIGN(source_ic2); ln_pk_tot += psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2]; } else { psp->ln_pk[(index_tau * psp->ln_k_size + index_k)* psp->ic_ic_size[index_md] + index_ic1_ic2] = 0.; } } } /* if non-linear corrections required, compute the total non-linear matter power spectrum */ if (pnl->method != nl_none) { psp->ln_pk_nl[index_tau * psp->ln_k_size + index_k] = ln_pk_tot + 2.*log(pnl->nl_corr_density[(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]); } } } /**- if interpolation of P(k,tau) will be needed (as a function of tau), compute array of second derivatives in view of spline interpolation */ if (psp->ln_tau_size > 1) { class_alloc(psp->ddln_pk,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message); class_call(array_spline_table_lines(psp->ln_tau, psp->ln_tau_size, psp->ln_pk, psp->ic_ic_size[index_md]*psp->ln_k_size, psp->ddln_pk, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } /* compute sigma8 (mean variance today in sphere of radius 8/h Mpc */ class_call(spectra_sigma(pba,ppm,psp,8./pba->h,0.,&(psp->sigma8)), psp->error_message, psp->error_message); if (psp->spectra_verbose>0) fprintf(stdout," -> sigma8=%g (computed till k = %g h/Mpc)\n", psp->sigma8, exp(psp->ln_k[psp->ln_k_size-1])/pba->h); /**- if interpolation of P_NL(k,tau) will be needed (as a function of tau), compute array of second derivatives in view of spline interpolation */ if (pnl->method != nl_none) { if (psp->ln_tau_size > 1) { class_alloc(psp->ddln_pk_nl,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_ic_size[index_md],psp->error_message); class_call(array_spline_table_lines(psp->ln_tau, psp->ln_tau_size, psp->ln_pk_nl, psp->ln_k_size, psp->ddln_pk_nl, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } } free (primordial_pk); return _SUCCESS_; } /** * This routine computes sigma(R) given P(k) (does not check that k_max is large * enough) * * @param pba Input: pointer to background structure * @param ppm Input: pointer to primordial structure * @param psp Input: pointer to spectra structure * @param z Input: redhsift * @param R Input: radius in Mpc * @param sigma Output: variance in a sphere of radius R (dimensionless) */ int spectra_sigma( struct background * pba, struct primordial * ppm, struct spectra * psp, double R, double z, double * sigma ) { double pk; double * pk_ic = NULL; double * array_for_sigma; int index_num; int index_k; int index_y; int index_ddy; int i; double k,W,x; if (psp->ic_ic_size[psp->index_md_scalars]>1) class_alloc(pk_ic, psp->ic_ic_size[psp->index_md_scalars]*sizeof(double), psp->error_message); i=0; index_k=i; i++; index_y=i; i++; index_ddy=i; i++; index_num=i; class_alloc(array_for_sigma, psp->ln_k_size*index_num*sizeof(double), psp->error_message); for (i=0;i<psp->ln_k_size;i++) { k=exp(psp->ln_k[i]); if (i == (psp->ln_k_size-1)) k *= 0.9999999; // to prevent rounding error leading to k being bigger than maximum value x=k*R; W=3./x/x/x*(sin(x)-x*cos(x)); class_call(spectra_pk_at_k_and_z(pba,ppm,psp,k,z,&pk,pk_ic), psp->error_message, psp->error_message); array_for_sigma[i*index_num+index_k]=k; array_for_sigma[i*index_num+index_y]=k*k*pk*W*W; } class_call(array_spline(array_for_sigma, index_num, psp->ln_k_size, index_k, index_y, index_ddy, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); class_call(array_integrate_all_spline(array_for_sigma, index_num, psp->ln_k_size, index_k, index_y, index_ddy, sigma, psp->error_message), psp->error_message, psp->error_message); free(array_for_sigma); if (psp->ic_ic_size[psp->index_md_scalars]>1) free(pk_ic); *sigma = sqrt(*sigma/(2.*_PI_*_PI_)); return _SUCCESS_; } /** * This routine computes a table of values for all matter power spectra P(k), * given the source functions and primordial spectra. * * @param pba Input : pointer to background structure (will provide density of each species) * @param ppt Input : pointer to perturbation structure (contain source functions) * @param psp Input/Output: pointer to spectra structure * @return the error status */ int spectra_matter_transfers( struct background * pba, struct perturbs * ppt, struct spectra * psp ) { /** Summary: */ /** - define local variables */ int index_md; int index_ic; int index_k; int index_tau; int last_index_back; double * pvecback_sp_long; /* array with argument pvecback_sp_long[pba->index_bg] */ double delta_i,theta_i,rho_i; double delta_rho_tot,rho_tot; double rho_plus_p_theta_tot,rho_plus_p_tot; int n_ncdm; /** - check the presence of scalar modes */ class_test((ppt->has_scalars == _FALSE_), psp->error_message, "you cannot ask for matter power spectrum since you turned off scalar modes"); index_md = psp->index_md_scalars; /** - allocate and fill array of T_i(k,tau) values */ class_alloc(psp->matter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message); /** - allocate temporary vectors where the background quantitites will be stored */ class_alloc(pvecback_sp_long,pba->bg_size*sizeof(double),psp->error_message); for (index_tau=0 ; index_tau < psp->ln_tau_size; index_tau++) { class_call(background_at_tau(pba, ppt->tau_sampling[index_tau-psp->ln_tau_size+ppt->tau_size], /* for this last argument we could have passed exp(psp->ln_tau[index_tau]) but we would then loose precision in the exp(log(x)) operation) */ pba->long_info, pba->inter_normal, &last_index_back, pvecback_sp_long), pba->error_message, psp->error_message); for (index_k=0; index_k<psp->ln_k_size; index_k++) { for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) { delta_rho_tot=0.; rho_tot=0.; rho_plus_p_theta_tot=0.; rho_plus_p_tot=0.; /* T_g(k,tau) */ rho_i = pvecback_sp_long[pba->index_bg_rho_g]; if (ppt->has_source_delta_g == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_g] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_g] = delta_i; delta_rho_tot += rho_i * delta_i; rho_tot += rho_i; } if (ppt->has_source_theta_g == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_g] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_g] = theta_i; rho_plus_p_theta_tot += 4./3. * rho_i * theta_i; rho_plus_p_tot += 4./3. * rho_i; } /* T_b(k,tau) */ rho_i = pvecback_sp_long[pba->index_bg_rho_b]; if (ppt->has_source_delta_b == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_b] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_b] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_b == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_b] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_b] = theta_i; rho_plus_p_theta_tot += rho_i * theta_i; } rho_plus_p_tot += rho_i; /* T_cdm(k,tau) */ if (pba->has_cdm == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_cdm]; if (ppt->has_source_delta_cdm == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_cdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_cdm] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_cdm == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_cdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_cdm] = theta_i; rho_plus_p_theta_tot += rho_i * theta_i; } rho_plus_p_tot += rho_i; } /* T_dcdm(k,tau) */ if (pba->has_dcdm == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_dcdm]; if (ppt->has_source_delta_dcdm == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dcdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dcdm] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_dcdm == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dcdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dcdm] = theta_i; rho_plus_p_theta_tot += rho_i * theta_i; } rho_plus_p_tot += rho_i; } /* T_scf(k,tau) */ if (pba->has_scf == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_scf]; if (ppt->has_source_delta_scf == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_scf] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_scf] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_scf == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_scf] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_scf] = theta_i; rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]) * theta_i; } rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_scf]); } /* T_fld(k,tau) */ if (pba->has_fld == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_fld]; if (ppt->has_source_delta_fld == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_fld] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_fld] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_fld == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_fld] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_fld] = theta_i; rho_plus_p_theta_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i * theta_i; } rho_plus_p_tot += (1. + pba->w0_fld + pba->wa_fld * (1. - pvecback_sp_long[pba->index_bg_a] / pba->a_today)) * rho_i; } /* T_ur(k,tau) */ if (pba->has_ur == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_ur]; if (ppt->has_source_delta_ur == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ur] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ur] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_ur == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ur] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ur] = theta_i; rho_plus_p_theta_tot += 4./3. * rho_i * theta_i; } rho_plus_p_tot += 4./3. * rho_i; } /* T_dr(k,tau) */ if (pba->has_dr == _TRUE_) { rho_i = pvecback_sp_long[pba->index_bg_rho_dr]; if (ppt->has_source_delta_dr == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_dr] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_dr] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_dr == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_dr] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_dr] = theta_i; rho_plus_p_theta_tot += 4./3. * rho_i * theta_i; } rho_plus_p_tot += 4./3. * rho_i; } /* T_ncdm_i(k,tau) */ if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { rho_i = pvecback_sp_long[pba->index_bg_rho_ncdm1+n_ncdm]; if (ppt->has_source_delta_ncdm == _TRUE_) { delta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_delta_ncdm1+n_ncdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_ncdm1+n_ncdm] = delta_i; delta_rho_tot += rho_i * delta_i; } rho_tot += rho_i; if (ppt->has_source_theta_ncdm == _TRUE_) { theta_i = ppt->sources[index_md] [index_ic * ppt->tp_size[index_md] + ppt->index_tp_theta_ncdm1+n_ncdm] [(index_tau-psp->ln_tau_size+ppt->tau_size) * ppt->k_size[index_md] + index_k]; psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_ncdm1+n_ncdm] = theta_i; rho_plus_p_theta_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]) * theta_i; } rho_plus_p_tot += (rho_i + pvecback_sp_long[pba->index_bg_p_ncdm1+n_ncdm]); } } /* could include homogeneous component in rho_tot if uncommented (leave commented to match CMBFAST/CAMB definition) */ /* if (pba->has_lambda == _TRUE_) { */ /* rho_i = pvecback_sp_long[pba->index_bg_rho_lambda]; */ /* rho_tot += rho_i; */ /* } */ /* T_tot(k,tau) */ if (ppt->has_density_transfers == _TRUE_) { psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_delta_tot] = delta_rho_tot/rho_tot; } if (ppt->has_velocity_transfers == _TRUE_) { psp->matter_transfer[((index_tau*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + psp->index_tr_theta_tot] = rho_plus_p_theta_tot/rho_plus_p_tot; } } } } /**- if interpolation of P(k,tau) will be needed (as a function of tau), compute array of second derivatives in view of spline interpolation */ if (psp->ln_tau_size > 1) { class_alloc(psp->ddmatter_transfer,sizeof(double)*psp->ln_tau_size*psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size,psp->error_message); class_call(array_spline_table_lines(psp->ln_tau, psp->ln_tau_size, psp->matter_transfer, psp->ic_size[index_md]*psp->ln_k_size*psp->tr_size, psp->ddmatter_transfer, _SPLINE_EST_DERIV_, psp->error_message), psp->error_message, psp->error_message); } free (pvecback_sp_long); return _SUCCESS_; } int spectra_output_tk_titles(struct background *pba, struct perturbs *ppt, enum file_format output_format, char titles[_MAXTITLESTRINGLENGTH_] ){ int n_ncdm; char tmp[40]; if (output_format == class_format) { class_store_columntitle(titles,"k (h/Mpc)",_TRUE_); if (ppt->has_density_transfers == _TRUE_) { class_store_columntitle(titles,"d_g",_TRUE_); class_store_columntitle(titles,"d_b",_TRUE_); class_store_columntitle(titles,"d_cdm",pba->has_cdm); class_store_columntitle(titles,"d_fld",pba->has_fld); class_store_columntitle(titles,"d_ur",pba->has_ur); if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { sprintf(tmp,"d_ncdm[%d]",n_ncdm); class_store_columntitle(titles,tmp,_TRUE_); } } class_store_columntitle(titles,"d_dcdm",pba->has_dcdm); class_store_columntitle(titles,"d_dr",pba->has_dr); class_store_columntitle(titles,"d_scf",pba->has_scf); class_store_columntitle(titles,"d_tot",_TRUE_); } if (ppt->has_velocity_transfers == _TRUE_) { class_store_columntitle(titles,"t_g",_TRUE_); class_store_columntitle(titles,"t_b",_TRUE_); class_store_columntitle(titles,"t_cdm",((pba->has_cdm == _TRUE_) && (ppt->gauge != synchronous))); class_store_columntitle(titles,"t_fld",pba->has_fld); class_store_columntitle(titles,"t_ur",pba->has_ur); if (pba->has_ncdm == _TRUE_) { for (n_ncdm=0; n_ncdm < pba->N_ncdm; n_ncdm++) { sprintf(tmp,"t_ncdm[%d]",n_ncdm); class_store_columntitle(titles,tmp,_TRUE_); } } class_store_columntitle(titles,"t_dcdm",pba->has_dcdm); class_store_columntitle(titles,"t_dr",pba->has_dr); class_store_columntitle(titles,"t__scf",pba->has_scf); class_store_columntitle(titles,"t_tot",_TRUE_); } } else if (output_format == camb_format) { class_store_columntitle(titles,"k (h/Mpc)",_TRUE_); class_store_columntitle(titles,"-T_cdm/k2",_TRUE_); class_store_columntitle(titles,"-T_b/k2",_TRUE_); class_store_columntitle(titles,"-T_g/k2",_TRUE_); class_store_columntitle(titles,"-T_ur/k2",_TRUE_); class_store_columntitle(titles,"-T_ncdm/k2",_TRUE_); class_store_columntitle(titles,"-T_tot/k2",_TRUE_); } return _SUCCESS_; } int spectra_output_tk_data( struct background * pba, struct perturbs * ppt, struct spectra * psp, enum file_format output_format, double z, int number_of_titles, double *data ) { int n_ncdm; double k, k_over_h, k2; double * tkfull=NULL; /* array with argument pk_ic[(index_k * psp->ic_size[index_md] + index_ic)*psp->tr_size+index_tr] */ double *tk; double *dataptr; int index_md=0; int index_ic; int index_k; int index_tr; int storeidx; if (psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size > 0){ class_alloc(tkfull, psp->ln_k_size*psp->ic_size[index_md]*psp->tr_size*sizeof(double), psp->error_message); } /** - compute T_i(k) for each k (if several ic's, compute it for each ic; if z_pk = 0, this is done by directly reading inside the pre-computed table; if not, this is done by interpolating the table at the correct value of tau. */ /* if z_pk = 0, no interpolation needed */ if (z == 0.) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { for (index_tr=0; index_tr<psp->tr_size; index_tr++) { for (index_ic=0; index_ic<psp->ic_size[index_md]; index_ic++) { tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr] = psp->matter_transfer[(((psp->ln_tau_size-1)*psp->ln_k_size + index_k) * psp->ic_size[index_md] + index_ic) * psp->tr_size + index_tr]; } } } } /* if 0 <= z_pk <= z_max_pk, interpolation needed, */ else { class_call(spectra_tk_at_z(pba, psp, z, tkfull), psp->error_message, psp->error_message); } /** - store data */ for (index_ic = 0; index_ic < psp->ic_size[index_md]; index_ic++) { for (index_k=0; index_k<psp->ln_k_size; index_k++) { storeidx = 0; dataptr = data+index_ic*(psp->ln_k_size*number_of_titles)+index_k*number_of_titles; tk = &(tkfull[(index_k * psp->ic_size[index_md] + index_ic) * psp->tr_size]); k = exp(psp->ln_k[index_k]); k2 = k*k; k_over_h = k/pba->h; class_store_double(dataptr, k_over_h, _TRUE_,storeidx); /* indices for species associated with a velocity transfer function in Fourier space */ if (output_format == class_format) { if (ppt->has_density_transfers == _TRUE_) { class_store_double(dataptr,tk[psp->index_tr_delta_g],ppt->has_source_delta_g,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_b],ppt->has_source_delta_b,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_cdm],ppt->has_source_delta_cdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_fld],ppt->has_source_delta_fld,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_ur],ppt->has_source_delta_ur,storeidx); if (pba->has_ncdm == _TRUE_){ for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ class_store_double(dataptr,tk[psp->index_tr_delta_ncdm1+n_ncdm],ppt->has_source_delta_ncdm,storeidx); } } class_store_double(dataptr,tk[psp->index_tr_delta_dcdm],ppt->has_source_delta_dcdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_dr],ppt->has_source_delta_dr,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_scf],ppt->has_source_delta_scf,storeidx); class_store_double(dataptr,tk[psp->index_tr_delta_tot],_TRUE_,storeidx); } if (ppt->has_velocity_transfers == _TRUE_) { class_store_double(dataptr,tk[psp->index_tr_theta_g],ppt->has_source_theta_g,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_b],ppt->has_source_theta_b,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_cdm],ppt->has_source_theta_cdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_fld],ppt->has_source_theta_fld,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_ur],ppt->has_source_theta_ur,storeidx); if (pba->has_ncdm == _TRUE_){ for (n_ncdm = 0; n_ncdm < pba->N_ncdm; n_ncdm++){ class_store_double(dataptr,tk[psp->index_tr_theta_ncdm1+n_ncdm],ppt->has_source_theta_ncdm,storeidx); } } class_store_double(dataptr,tk[psp->index_tr_theta_dcdm],ppt->has_source_theta_dcdm,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_dr],ppt->has_source_theta_dr,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_scf],ppt->has_source_theta_scf,storeidx); class_store_double(dataptr,tk[psp->index_tr_theta_tot],_TRUE_,storeidx); } } else if (output_format == camb_format) { /* rescale and reorder the matter transfer functions following the CMBFAST/CAMB convention */ class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_cdm]/k2,ppt->has_source_delta_cdm,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_b]/k2,ppt->has_source_delta_b,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_g]/k2,ppt->has_source_delta_g,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ur]/k2,ppt->has_source_delta_ur,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_ncdm1]/k2,ppt->has_source_delta_ncdm,storeidx,0.0); class_store_double_or_default(dataptr,-tk[psp->index_tr_delta_tot]/k2,_TRUE_,storeidx,0.0); } } } //Neccessary because the size could be zero (if psp->tr_size is zero) if (tkfull != NULL) free(tkfull); return _SUCCESS_; } int spectra_firstline_and_ic_suffix(struct perturbs *ppt, int index_ic, char first_line[_LINE_LENGTH_MAX_], FileName ic_suffix){ first_line[0]='\0'; ic_suffix[0]='\0'; if ((ppt->has_ad == _TRUE_) && (index_ic == ppt->index_ic_ad)) { strcpy(ic_suffix,"ad"); strcpy(first_line,"for adiabatic (AD) mode (normalized to initial curvature=1) "); } if ((ppt->has_bi == _TRUE_) && (index_ic == ppt->index_ic_bi)) { strcpy(ic_suffix,"bi"); strcpy(first_line,"for baryon isocurvature (BI) mode (normalized to initial entropy=1)"); } if ((ppt->has_cdi == _TRUE_) && (index_ic == ppt->index_ic_cdi)) { strcpy(ic_suffix,"cdi"); strcpy(first_line,"for CDM isocurvature (CDI) mode (normalized to initial entropy=1)"); } if ((ppt->has_nid == _TRUE_) && (index_ic == ppt->index_ic_nid)) { strcpy(ic_suffix,"nid"); strcpy(first_line,"for neutrino density isocurvature (NID) mode (normalized to initial entropy=1)"); } if ((ppt->has_niv == _TRUE_) && (index_ic == ppt->index_ic_niv)) { strcpy(ic_suffix,"niv"); strcpy(first_line,"for neutrino velocity isocurvature (NIV) mode (normalized to initial entropy=1)"); } return _SUCCESS_; }
trmv_x_csc_u_hi_trans.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif #include <string.h> #include <memory.h> static alphasparse_status_t trmv_csc_u_hi_trans_unroll4(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y, ALPHA_INT lrs, ALPHA_INT lre) { ALPHA_INT m = A->cols; for (ALPHA_INT i = lrs; i < lre; i++) { register ALPHA_Number tmp0; register ALPHA_Number tmp1; register ALPHA_Number tmp2; register ALPHA_Number tmp3; alpha_setzero(tmp0); alpha_setzero(tmp1); alpha_setzero(tmp2); alpha_setzero(tmp3); ALPHA_INT pks = A->cols_start[i]; ALPHA_INT pke = A->cols_end[i]; ALPHA_INT pkl = pke - pks; ALPHA_INT pkl4 = pkl - 4; ALPHA_INT row_ind0, row_ind1, row_ind2, row_ind3; ALPHA_Number *A_val = &A->values[pks]; ALPHA_INT *A_row = &A->row_indx[pks]; ALPHA_INT pi; for (pi = 0; pi < pkl4; pi += 4) { row_ind0 = A_row[pi]; row_ind1 = A_row[pi + 1]; row_ind2 = A_row[pi + 2]; row_ind3 = A_row[pi + 3]; if (row_ind3 < i){ alpha_madde(tmp0, A_val[pi], x[row_ind0]); alpha_madde(tmp1, A_val[pi+1], x[row_ind1]); alpha_madde(tmp2, A_val[pi+2], x[row_ind2]); alpha_madde(tmp3, A_val[pi+3], x[row_ind3]); }else if (row_ind2 < i){ alpha_madde(tmp1, A_val[pi], x[row_ind0]); alpha_madde(tmp2, A_val[pi+1], x[row_ind1]); alpha_madde(tmp3, A_val[pi+2], x[row_ind2]); }else if (row_ind1 < i){ alpha_madde(tmp2, A_val[pi], x[row_ind0]); alpha_madde(tmp3, A_val[pi+1], x[row_ind1]); }else if (row_ind0 < i){ alpha_madde(tmp3, A_val[pi], x[row_ind0]); } } for (; pi < pkl; pi += 1) { if (A_row[pi] < i) { alpha_madde(tmp0, A_val[pi], x[A_row[pi]]); } } alpha_add(tmp0, tmp0, tmp1); alpha_add(tmp2, tmp2, tmp3); alpha_add(tmp0, tmp0, tmp2); alpha_add(tmp0, tmp0, x[i]); alpha_mul(tmp0, tmp0, alpha); alpha_mul(tmp1, beta, y[i]); alpha_add(y[i], tmp0, tmp1); } return ALPHA_SPARSE_STATUS_SUCCESS; } static alphasparse_status_t trmv_csc_u_hi_trans_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { ALPHA_INT n = A->cols; ALPHA_INT num_threads = alpha_get_thread_num(); ALPHA_INT partition[num_threads + 1]; balanced_partition_row_by_nnz(A->cols_end, n, num_threads, partition); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT local_n_s = partition[tid]; ALPHA_INT local_n_e = partition[tid + 1]; trmv_csc_u_hi_trans_unroll4(alpha,A,x,beta,y,local_n_s,local_n_e); } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSC *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return trmv_csc_u_hi_trans_omp(alpha, A, x, beta, y); }
HardTanh.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/HardTanh.c" #else static int nn_(HardTanh_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_(Tensor_id)); THTensor_(resizeAs)(output, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { TH_TENSOR_APPLY2(real, output, real, input, \ if(*input_data < -1) \ *output_data = -1; \ else if(*input_data <= 1) \ *output_data = *input_data; \ else \ *output_data = 1;); } else { real* output_data = THTensor_(data)(output); real* input_data = THTensor_(data)(input); long k; #pragma omp parallel for private(k) for (k = 0; k < input->size[0]; k++) { real* ptr_output = output_data + k*input->stride[0]; real* ptr_input = input_data + k*input->stride[0]; long i; for (i = 0; i < input->stride[0]; i++) { if(ptr_input[i] < -1) ptr_output[i] = -1; else if (ptr_input[i] <= 1) ptr_output[i] = ptr_input[i]; else ptr_output[i] = 1; } } } return 1; } static int nn_(HardTanh_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_(Tensor_id)); THTensor *gradOutput = luaT_checkudata(L, 3, torch_(Tensor_id)); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_(Tensor_id)); THTensor_(resizeAs)(gradInput, input); if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, \ if(*input_data < -1 || *input_data > 1) \ *gradInput_data = 0; \ else \ *gradInput_data = *gradOutput_data;); } else { real* gradOutput_data = THTensor_(data)(gradOutput); real* gradInput_data = THTensor_(data)(gradInput); real* input_data = THTensor_(data)(input); long k; #pragma omp parallel for private(k) for (k = 0; k < input->size[0]; k++) { real* ptr_gradOutput = gradOutput_data + k*input->stride[0]; real* ptr_gradInput = gradInput_data + k*input->stride[0]; real* ptr_input = input_data + k*input->stride[0]; long i; for (i = 0; i < input->stride[0]; i++) { if(ptr_input[i] < -1 || ptr_input[i] > 1) ptr_gradInput[i] = 0; else ptr_gradInput[i] = ptr_gradOutput[i]; } } } return 1; } static const struct luaL_Reg nn_(HardTanh__) [] = { {"HardTanh_updateOutput", nn_(HardTanh_updateOutput)}, {"HardTanh_updateGradInput", nn_(HardTanh_updateGradInput)}, {NULL, NULL} }; static void nn_(HardTanh_init)(lua_State *L) { luaT_pushmetaclass(L, torch_(Tensor_id)); luaT_registeratname(L, nn_(HardTanh__), "nn"); lua_pop(L,1); } #endif
fill_ints.c
/* * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <complex.h> #include <assert.h> #include "config.h" #include "cint.h" #include "vhf/fblas.h" #define INTBUFMAX 1000 #define INTBUFMAX10 8000 #define IMGBLK 80 #define OF_CMPLX 2 #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define MAX(X,Y) ((X)>(Y)?(X):(Y)) int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); static int shloc_partition(int *kshloc, int *ao_loc, int ksh0, int ksh1, int dkmax) { int ksh; int nloc = 0; int loclast = ao_loc[ksh0]; kshloc[0] = ksh0; for (ksh = ksh0+1; ksh < ksh1; ksh++) { assert(ao_loc[ksh+1] - ao_loc[ksh] < dkmax); if (ao_loc[ksh+1] - loclast > dkmax) { nloc += 1; kshloc[nloc] = ksh; loclast = ao_loc[ksh]; } } nloc += 1; kshloc[nloc] = ksh1; return nloc; } static void shift_bas(double *env_loc, double *env, double *Ls, int ptr, int iL) { env_loc[ptr+0] = env[ptr+0] + Ls[iL*3+0]; env_loc[ptr+1] = env[ptr+1] + Ls[iL*3+1]; env_loc[ptr+2] = env[ptr+2] + Ls[iL*3+2]; } static void sort3c_kks1(double complex *out, double *bufr, double *bufi, int *kptij_idx, int *shls_slice, int *ao_loc, int nkpts, int nkpts_ij, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; out += (ip * naoj + jp) * naok; int i, j, k, kk, ik, jk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts_ij; kk++) { ik = kptij_idx[kk] / nkpts; jk = kptij_idx[kk] % nkpts; off = (ik*nkpts+jk) * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (j = 0; j < dj; j++) { for (k = 0; k < dk; k++) { for (i = 0; i < di; i++) { pout[i*njk+k] = pbr[k*dij+i] + pbi[k*dij+i]*_Complex_I; } } pout += naok; pbr += di; pbi += di; } } off += dijk * comp; } out += nijk * comp; } } static void _nr3c_fill_kk(int (*intor)(), void (*fsort)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const double ND1 = -1; jsh += jsh0; ish += ish0; int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS]; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int dkmax = INTBUFMAX / dij; int kshloc[ksh1-ksh0+1]; int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax); int i, m, msh0, msh1, dijm, dijmk; int ksh, dk, iL0, iL1, iL, jL, iLcount, empty; int shls[3]; double *bufkk_r, *bufkk_i, *bufkL_r, *bufkL_i, *bufL, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (m = 0; m < nkshloc; m++) { msh0 = kshloc[m]; msh1 = kshloc[m+1]; dkmax = ao_loc[msh1] - ao_loc[msh0]; dijm = dij * dkmax * comp; dijmk = dijm * nkpts; bufkk_r = buf; bufkk_i = bufkk_r + (size_t)nkpts * dijmk; bufkL_r = bufkk_i + (size_t)nkpts * dijmk; bufkL_i = bufkL_r + (size_t)MIN(nimgs,IMGBLK) * dijmk; bufL = bufkL_i + (size_t)MIN(nimgs,IMGBLK) * dijmk; cache = bufL + (size_t)nimgs * dijm; for (i = 0; i < nkpts*dijmk*OF_CMPLX; i++) { bufkk_r[i] = 0; } for (iL0 = 0; iL0 < nimgs; iL0+=IMGBLK) { iLcount = MIN(IMGBLK, nimgs - iL0); for (iL = iL0; iL < iL0+iLcount; iL++) { shift_bas(env_loc, env, Ls, iptrxyz, iL); pbuf = bufL; for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); for (ksh = msh0; ksh < msh1; ksh++) { shls[2] = ksh; if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { empty = 0; } dk = ao_loc[ksh+1] - ao_loc[ksh]; pbuf += dij*dk * comp; } } dgemm_(&TRANS_N, &TRANS_N, &dijm, &nkpts, &nimgs, &D1, bufL, &dijm, expkL_r, &nimgs, &D0, bufkL_r+(iL-iL0)*(size_t)dijmk, &dijm); dgemm_(&TRANS_N, &TRANS_N, &dijm, &nkpts, &nimgs, &D1, bufL, &dijm, expkL_i, &nimgs, &D0, bufkL_i+(iL-iL0)*(size_t)dijmk, &dijm); } // iL in range(0, nimgs) // conj(exp(1j*dot(h,k))) dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &D1, bufkL_r, &dijmk, expkL_r+iL0, &nimgs, &D1, bufkk_r, &dijmk); dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &D1, bufkL_i, &dijmk, expkL_i+iL0, &nimgs, &D1, bufkk_r, &dijmk); dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &D1, bufkL_i, &dijmk, expkL_r+iL0, &nimgs, &D1, bufkk_i, &dijmk); dgemm_(&TRANS_N, &TRANS_N, &dijmk, &nkpts, &iLcount, &ND1, bufkL_r, &dijmk, expkL_i+iL0, &nimgs, &D1, bufkk_i, &dijmk); } (*fsort)(out, bufkk_r, bufkk_i, kptij_idx, shls_slice, ao_loc, nkpts, nkpts_ij, comp, ish, jsh, msh0, msh1); } } /* ('...LM,kL,lM->...kl', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_kks1(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr3c_fill_kk(intor, &sort3c_kks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } static void sort3c_kks2_igtj(double complex *out, double *bufr, double *bufi, int *kptij_idx, int *shls_slice, int *ao_loc, int nkpts, int nkpts_ij, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; assert(naoi == naoj); const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; double complex *outij = out + (ip * naoj + jp) * naok; double complex *outji = out + (jp * naoj + ip) * naok; int i, j, k, kk, ik, jk, ksh, ic, dk, dijk; size_t offij, offji; double *pbij_r, *pbij_i, *pbji_r, *pbji_i; double complex *poutij, *poutji; for (kk = 0; kk < nkpts_ij; kk++) { ik = kptij_idx[kk] / nkpts; jk = kptij_idx[kk] % nkpts; offij = (ik*nkpts+jk) * dijmc; offji = (jk*nkpts+ik) * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { poutij = outij + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; poutji = outji + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbij_r = bufr + offij + dijk*ic; pbij_i = bufi + offij + dijk*ic; pbji_r = bufr + offji + dijk*ic; pbji_i = bufi + offji + dijk*ic; for (j = 0; j < dj; j++) { for (k = 0; k < dk; k++) { for (i = 0; i < di; i++) { poutij[i*njk +k] = pbij_r[k*dij+i] + pbij_i[k*dij+i]*_Complex_I; poutji[i*naok+k] = pbji_r[k*dij+i] - pbji_i[k*dij+i]*_Complex_I; } } poutij += naok; poutji += njk; pbij_r += di; pbij_i += di; pbji_r += di; pbji_i += di; } } offij += dijk * comp; offji += dijk * comp; } outij += nijk * comp; outji += nijk * comp; } } /* ('...LM,kL,lM->...kl', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_kks2(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _nr3c_fill_kk(intor, &sort3c_kks2_igtj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } else if (ip == jp) { _nr3c_fill_kk(intor, &sort3c_kks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } } static void sort3c_ks1(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; out += (ip * naoj + jp) * naok; int i, j, k, kk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (j = 0; j < dj; j++) { for (k = 0; k < dk; k++) { for (i = 0; i < di; i++) { pout[i*njk+k] = pbr[k*dij+i] + pbi[k*dij+i]*_Complex_I; } } pout += naok; pbr += di; pbi += di; } } off += dijk * comp; } out += nijk * comp; } } /* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */ static void _nr3c_fill_k(int (*intor)(), void (*fsort)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const char TRANS_N = 'N'; const double D1 = 1; jsh += jsh0; ish += ish0; int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS]; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int dkmax = INTBUFMAX10 / dij; int kshloc[ksh1-ksh0+1]; int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax); int i, j, m, msh0, msh1, dijm; size_t dijmk; int ksh, dk, iL0, iL1, iL, jL, iLcount, empty; int shls[3]; double *bufexp_r = buf; double *bufexp_i = bufexp_r + nimgs * nkpts; double *bufk_r = bufexp_i + nimgs * nkpts; double *bufk_i, *bufL, *pbuf, *cache; shls[0] = ish; shls[1] = jsh; for (m = 0; m < nkshloc; m++) { msh0 = kshloc[m]; msh1 = kshloc[m+1]; dkmax = ao_loc[msh1] - ao_loc[msh0]; dijm = dij * dkmax * comp; dijmk = dijm * nkpts; bufk_i = bufk_r + dijmk; bufL = bufk_i + dijmk; cache = bufL + nimgs * dijm; for (i = 0; i < dijmk*OF_CMPLX; i++) { bufk_r[i] = 0; } for (iL = 0; iL < nimgs; iL++) { shift_bas(env_loc, env, Ls, iptrxyz, iL); pbuf = bufL; for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); for (ksh = msh0; ksh < msh1; ksh++) { shls[2] = ksh; if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { empty = 0; } dk = ao_loc[ksh+1] - ao_loc[ksh]; pbuf += dij*dk * comp; } } // ('k,kL->kL', conj(expkL[iL]), expkL) for (i = 0; i < nkpts; i++) { for (j = 0; j < nimgs; j++) { bufexp_r[i*nimgs+j] = expkL_r[i*nimgs+j] * expkL_r[i*nimgs+iL]; bufexp_r[i*nimgs+j]+= expkL_i[i*nimgs+j] * expkL_i[i*nimgs+iL]; bufexp_i[i*nimgs+j] = expkL_i[i*nimgs+j] * expkL_r[i*nimgs+iL]; bufexp_i[i*nimgs+j]-= expkL_r[i*nimgs+j] * expkL_i[i*nimgs+iL]; } } dgemm_(&TRANS_N, &TRANS_N, &dijm, &nkpts, &nimgs, &D1, bufL, &dijm, bufexp_r, &nimgs, &D1, bufk_r, &dijm); dgemm_(&TRANS_N, &TRANS_N, &dijm, &nkpts, &nimgs, &D1, bufL, &dijm, bufexp_i, &nimgs, &D1, bufk_i, &dijm); } // iL in range(0, nimgs) (*fsort)(out, bufk_r, bufk_i, shls_slice, ao_loc, nkpts, comp, ish, jsh, msh0, msh1); } } /* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_ks1(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr3c_fill_k(intor, sort3c_ks1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } static void sort3c_ks2_igtj(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, kk, ik, jk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I; } } pout += (i+ao_loc[ish]+1) * naok; } } off += dijk * comp; } out += nijk * comp; } } static void sort3c_ks2_ieqj(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, kk, ik, jk, ksh, ic, dk, dijk; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dijmc; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk*ic + ao_loc[ksh]-ao_loc[ksh0]; pbr = bufr + off + dijk*ic; pbi = bufi + off + dijk*ic; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pbr[k*dij+ij] + pbi[k*dij+ij]*_Complex_I; } } pout += (i+ao_loc[ish]+1) * naok; } } off += dijk * comp; } out += nijk * comp; } } /* ('...LM,kL,kM->...k', int3c, exp_kL, exp_kL) */ void PBCnr3c_fill_ks2(int (*intor)(), double complex *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _nr3c_fill_k(intor, &sort3c_ks2_igtj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } else if (ip == jp) { _nr3c_fill_k(intor, &sort3c_ks2_ieqj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } } static void sort3c_gs1(double *out, double *in, int *shls_slice, int *ao_loc, int comp, int ish, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t njk = naoj * naok; const size_t nijk = njk * naoi; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dij = di * dj; const int dkmax = ao_loc[msh1] - ao_loc[msh0]; const size_t dijmc = dij * dkmax * comp; out += (ip * naoj + jp) * naok; int i, j, k, ij, ksh, ic, dk, dijk; double *pin, *pout; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0]; pin = in + dijk * ic; for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { for (k = 0; k < dk; k++) { pout[i*njk+k] = pin[k*dij+i]; } } pout += naok; pin += di; } } in += dijk * comp; } } static void _nr3c_fill_g(int (*intor)(), void (*fsort)(), double *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; jsh += jsh0; ish += ish0; int iptrxyz = atm[PTR_COORD+bas[ATOM_OF+ish*BAS_SLOTS]*ATM_SLOTS]; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; int dkmax = INTBUFMAX10 / dij / 2 * MIN(IMGBLK,nimgs); int kshloc[ksh1-ksh0+1]; int nkshloc = shloc_partition(kshloc, ao_loc, ksh0, ksh1, dkmax); int i, j, m, msh0, msh1, dijm; int ksh, dk, iL, jL, dijkc; int shls[3]; int dijmc = dij * dkmax * comp; double *bufL = buf + dijmc; double *cache = bufL + dijmc; double *pbuf; shls[0] = ish; shls[1] = jsh; for (m = 0; m < nkshloc; m++) { msh0 = kshloc[m]; msh1 = kshloc[m+1]; dkmax = ao_loc[msh1] - ao_loc[msh0]; dijmc = dij * dkmax * comp; for (i = 0; i < dijmc; i++) { bufL[i] = 0; } for (iL = 0; iL < nimgs; iL++) { shift_bas(env_loc, env, Ls, iptrxyz, iL); for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); pbuf = bufL; for (ksh = msh0; ksh < msh1; ksh++) { shls[2] = ksh; dk = ao_loc[ksh+1] - ao_loc[ksh]; dijkc = dij*dk * comp; if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { for (i = 0; i < dijkc; i++) { pbuf[i] += buf[i]; } } pbuf += dijkc; } } } // iL in range(0, nimgs) (*fsort)(out, bufL, shls_slice, ao_loc, comp, ish, jsh, msh0, msh1); } } /* ('...LM->...', int3c) */ void PBCnr3c_fill_gs1(int (*intor)(), double *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr3c_fill_g(intor, &sort3c_gs1, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } static void sort3c_gs2_igtj(double *out, double *in, int *shls_slice, int *ao_loc, int comp, int ish, int jsh, int msh0, int msh1) { double *out0 = out; const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int dij = di * dj; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, ksh, ic, dk, dijk; double *pin, *pout; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0]; pin = in + dijk * ic; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pin[k*dij+ij]; } } pout += (i+ao_loc[ish]+1) * naok; } } in += dijk * comp; } } static void sort3c_gs2_ieqj(double *out, double *in, int *shls_slice, int *ao_loc, int comp, int ish, int jsh, int msh0, int msh1) { double *out0 = out; const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off0 = ao_loc[ish0] * (ao_loc[ish0] + 1) / 2; const size_t nij = ao_loc[ish1] * (ao_loc[ish1] + 1) / 2 - off0; const size_t nijk = nij * naok; const int di = ao_loc[ish+1] - ao_loc[ish]; const int dij = di * di; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += (ao_loc[ish]*(ao_loc[ish]+1)/2-off0 + jp) * naok; int i, j, k, ij, ksh, ic, dk, dijk; double *pin, *pout; for (ksh = msh0; ksh < msh1; ksh++) { dk = ao_loc[ksh+1] - ao_loc[ksh]; dijk = dij * dk; for (ic = 0; ic < comp; ic++) { pout = out + nijk * ic + ao_loc[ksh]-ao_loc[ksh0]; pin = in + dijk * ic; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { ij = j * di + i; for (k = 0; k < dk; k++) { pout[j*naok+k] = pin[k*dij+ij]; } } pout += (i+ao_loc[ish]+1) * naok; } } in += dijk * comp; } } /* ('...LM->...', int3c) */ void PBCnr3c_fill_gs2(int (*intor)(), double *out, int nkpts_ij, int nkpts, int comp, int nimgs, int ish, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { int ip = ish + shls_slice[0]; int jp = jsh + shls_slice[2] - nbas; if (ip > jp) { _nr3c_fill_g(intor, &sort3c_gs2_igtj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } else if (ip == jp) { _nr3c_fill_g(intor, &sort3c_gs2_ieqj, out, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } } int PBCsizeof_env(int *shls_slice, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; int ish, ia, np, nc; int nenv = 0; for (ish = ish0; ish < ish1; ish++) { ia = bas[ATOM_OF +ish*BAS_SLOTS]; nenv = MAX(atm[PTR_COORD+ia*ATM_SLOTS]+3, nenv); np = bas[NPRIM_OF+ish*BAS_SLOTS]; nc = bas[NCTR_OF +ish*BAS_SLOTS]; nenv = MAX(bas[PTR_EXP +ish*BAS_SLOTS]+np, nenv); nenv = MAX(bas[PTR_COEFF+ish*BAS_SLOTS]+np*nc, nenv); } return nenv; } void PBCnr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int nkpts_ij, int nkpts, int comp, int nimgs, double *Ls, double complex *expkL, int *kptij_idx, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int nksh = ksh1 - ksh0; double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX); double *expkL_i = expkL_r + nimgs*nkpts; int i; for (i = 0; i < nimgs*nkpts; i++) { expkL_r[i] = creal(expkL[i]); expkL_i[i] = cimag(expkL[i]); } size_t count; if (fill == &PBCnr3c_fill_kks1 || fill == &PBCnr3c_fill_kks2) { int dijk =(GTOmax_shell_dim(ao_loc, shls_slice+0, 1) * GTOmax_shell_dim(ao_loc, shls_slice+2, 1) * GTOmax_shell_dim(ao_loc, shls_slice+4, 1)); count = nkpts*nkpts * OF_CMPLX + nkpts*MIN(nimgs,IMGBLK) * OF_CMPLX + nimgs; // MAX(INTBUFMAX, dijk) to ensure buffer is enough for at least one (i,j,k) shell count*= MAX(INTBUFMAX, dijk) * comp; } else { count = (nkpts * OF_CMPLX + nimgs) * INTBUFMAX10 * comp; count+= nimgs * nkpts * OF_CMPLX; } const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, fill, eri, nkpts_ij, nkpts, comp, nimgs, \ Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, \ atm, natm, bas, nbas, env, count) { int ish, jsh, ij, i; int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env); nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env)); nenv = MAX(nenv, PBCsizeof_env(shls_slice+4, atm, natm, bas, nbas, env)); double *env_loc = malloc(sizeof(double)*nenv); memcpy(env_loc, env, sizeof(double)*nenv); double *buf = malloc(sizeof(double)*(count+cache_size)); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, nkpts_ij, nkpts, comp, nimgs, ish, jsh, buf, env_loc, Ls, expkL_r, expkL_i, kptij_idx, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); free(env_loc); } free(expkL_r); } static void sort2c_ks1(double complex *out, double *bufr, double *bufi, int *shls_slice, int *ao_loc, int nkpts, int comp, int jsh, int msh0, int msh1) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t nij = naoi * naoj; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; const int dimax = ao_loc[msh1] - ao_loc[msh0]; const size_t dmjc = dimax * dj * comp; out += jp; int i, j, kk, ish, ic, di, dij; size_t off; double *pbr, *pbi; double complex *pout; for (kk = 0; kk < nkpts; kk++) { off = kk * dmjc; for (ish = msh0; ish < msh1; ish++) { di = ao_loc[ish+1] - ao_loc[ish]; dij = di * dj; for (ic = 0; ic < comp; ic++) { pout = out + nij*ic + naoj*(ao_loc[ish]-ao_loc[ish0]); pbr = bufr + off + dij*ic; pbi = bufi + off + dij*ic; for (j = 0; j < dj; j++) { for (i = 0; i < di; i++) { pout[i*naoj+j] = pbr[j*di+i] + pbi[j*di+i]*_Complex_I; } } } off += dij * comp; } out += nij * comp; } } static void _nr2c_fill(int (*intor)(), double complex *out, int nkpts, int comp, int nimgs, int jsh, int ish0, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const char TRANS_N = 'N'; const double D1 = 1; const double D0 = 0; ish0 += shls_slice[0]; jsh += jsh0; int jptrxyz = atm[PTR_COORD+bas[ATOM_OF+jsh*BAS_SLOTS]*ATM_SLOTS]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; int dimax = INTBUFMAX10 / dj; int ishloc[ish1-ish0+1]; int nishloc = shloc_partition(ishloc, ao_loc, ish0, ish1, dimax); int i, j, m, msh0, msh1, dmjc, ish, di; int jL, empty; int shls[2]; double *bufk_r = buf; double *bufk_i, *bufL, *pbuf, *cache; shls[1] = jsh; for (m = 0; m < nishloc; m++) { msh0 = ishloc[m]; msh1 = ishloc[m+1]; dimax = ao_loc[msh1] - ao_loc[msh0]; dmjc = dj * dimax * comp; bufk_i = bufk_r + dmjc * nkpts; bufL = bufk_i + dmjc * nkpts; cache = bufL + dmjc * nimgs; pbuf = bufL; for (jL = 0; jL < nimgs; jL++) { shift_bas(env_loc, env, Ls, jptrxyz, jL); for (ish = msh0; ish < msh1; ish++) { shls[0] = ish; di = ao_loc[ish+1] - ao_loc[ish]; if ((*intor)(pbuf, NULL, shls, atm, natm, bas, nbas, env_loc, cintopt, cache)) { empty = 0; } pbuf += di * dj * comp; } } dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs, &D1, bufL, &dmjc, expkL_r, &nimgs, &D0, bufk_r, &dmjc); dgemm_(&TRANS_N, &TRANS_N, &dmjc, &nkpts, &nimgs, &D1, bufL, &dmjc, expkL_i, &nimgs, &D0, bufk_i, &dmjc); sort2c_ks1(out, bufk_r, bufk_i, shls_slice, ao_loc, nkpts, comp, jsh, msh0, msh1); } } /* ('...M,kL->...k', int3c, exp_kL, exp_kL) */ void PBCnr2c_fill_ks1(int (*intor)(), double complex *out, int nkpts, int comp, int nimgs, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr2c_fill(intor, out, nkpts, comp, nimgs, jsh, 0, buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } void PBCnr2c_fill_ks2(int (*intor)(), double complex *out, int nkpts, int comp, int nimgs, int jsh, double *buf, double *env_loc, double *Ls, double *expkL_r, double *expkL_i, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { _nr2c_fill(intor, out, nkpts, comp, nimgs, jsh, jsh, buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } void PBCnr2c_drv(int (*intor)(), void (*fill)(), double complex *out, int nkpts, int comp, int nimgs, double *Ls, double complex *expkL, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int njsh = jsh1 - jsh0; double *expkL_r = malloc(sizeof(double) * nimgs*nkpts * OF_CMPLX); double *expkL_i = expkL_r + nimgs*nkpts; int i; for (i = 0; i < nimgs*nkpts; i++) { expkL_r[i] = creal(expkL[i]); expkL_i[i] = cimag(expkL[i]); } const int cache_size = GTOmax_cache_size(intor, shls_slice, 2, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, fill, out, nkpts, comp, nimgs, \ Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, \ atm, natm, bas, nbas, env) { int jsh; int nenv = PBCsizeof_env(shls_slice, atm, natm, bas, nbas, env); nenv = MAX(nenv, PBCsizeof_env(shls_slice+2, atm, natm, bas, nbas, env)); double *env_loc = malloc(sizeof(double)*nenv); memcpy(env_loc, env, sizeof(double)*nenv); size_t count = nkpts * OF_CMPLX + nimgs; double *buf = malloc(sizeof(double)*(count*INTBUFMAX10*comp+cache_size)); #pragma omp for schedule(dynamic) for (jsh = 0; jsh < njsh; jsh++) { (*fill)(intor, out, nkpts, comp, nimgs, jsh, buf, env_loc, Ls, expkL_r, expkL_i, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); free(env_loc); } free(expkL_r); }
ast-dump-openmp-target-parallel.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test() { #pragma omp target parallel ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel.c:3:1, line:6:1> line:3:6 test 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1> // CHECK-NEXT: `-OMPTargetParallelDirective {{.*}} <line:4:1, col:28> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CapturedStmt {{.*}} <col:3> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <col:3> // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-NullStmt {{.*}} <col:3> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-NullStmt {{.*}} <line:5:3> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict' // CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict' // CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-CapturedStmt {{.*}} <line:5:3> // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-NullStmt {{.*}} <col:3> // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict' // CHECK-NEXT: |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition // CHECK-NEXT: | `-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: |-NullStmt {{.*}} <line:5:3> // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel.c:4:1) *const restrict'